pydaptivefiltering
1# pydaptivefiltering/__init__.py 2 3from .base import AdaptiveFilter 4from .lms import * 5from .rls import * 6from .set_membership import * 7from .lattice import * 8from .fast_rls import * 9from .qr_decomposition import * 10from .iir import * 11from .nonlinear import * 12from .subband import * 13from .blind import * 14from .kalman import * 15__version__ = "0.9" 16__author__ = "BruninLima" 17 18__all__ = ["AdaptiveFilter", 19 "LMS", "NLMS", "AffineProjection", "SignData", "SignError", "DualSign", 20 "LMSNewton", "Power2ErrorLMS", "TDomainLMS", "TDomainDCT", "TDomainDFT", 21 "RLS", "RLSAlt", 22 "SMNLMS", "SMBNLMS", "SMAffineProjection", "Simplified_SMAP", "Simplified_PUAP", 23 "LRLSPosteriori", "LRLSErrorFeedback", "LRLSPriori", "NormalizedLRLS", 24 "FastRLS", "StabFastRLS", 25 "QRRLS", 26 "ErrorEquation", "GaussNewton", "GaussNewtonGradient", "RLSIIR", "SteiglitzMcBride", 27 "BilinearRLS", "ComplexRBF", "MultilayerPerceptron", "RBF", "VolterraLMS", "VolterraRLS", 28 "CFDLMS", "DLCLLMS", "OLSBLMS", 29 "AffineProjectionCM", "CMA", "Godard", "Sato", 30 "Kalman", 31 "info"] 32 33 34def info(): 35 """Imprime informações sobre a cobertura de algoritmos da biblioteca.""" 36 print("\n" + "="*70) 37 print(" PyDaptive Filtering - Complete Library Overview") 38 print(" Reference: 'Adaptive Filtering' by Paulo S. R. Diniz") 39 print("="*70) 40 sections = { 41 "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain", 42 "Cap 5 (RLS)": "Standard RLS, Alternative RLS", 43 "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP", 44 "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS", 45 "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS", 46 "Cap 9 (QR)": "QR-Decomposition Based RLS", 47 "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR", 48 "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS", 49 "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS", 50 "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection", 51 "Cap 17 (Kalman)": "Kalman Filter", 52 } 53 for cap, algs in sections.items(): 54 print(f"\n{cap:25}: {algs}") 55 56 print("\n" + "-"*70) 57 print("Usage example: from pydaptivefiltering import LMS") 58 print("Documentation: help(pydaptivefiltering.LMS)") 59 print("="*70 + "\n")
134class AdaptiveFilter(ABC): 135 """Abstract base class for all adaptive filters. 136 137 Parameters 138 ---------- 139 filter_order: 140 Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used 141 as a generic size indicator for base allocation. 142 w_init: 143 Initial coefficient vector. If None, initialized to zeros. 144 145 Notes 146 ----- 147 - Subclasses should set `supports_complex = True` if they support complex-valued data. 148 - Subclasses are expected to call `_record_history()` every iteration (or use helper methods) 149 if they want coefficient trajectories. 150 """ 151 152 supports_complex: bool = False 153 154 def __init__(self, filter_order: int, w_init: Optional[ArrayLike] = None) -> None: 155 self.filter_order: int = int(filter_order) 156 self._dtype = complex if self.supports_complex else float 157 158 self.regressor: np.ndarray = np.zeros(self.filter_order + 1, dtype=self._dtype) 159 160 if w_init is not None: 161 self.w: np.ndarray = np.asarray(w_init, dtype=self._dtype) 162 else: 163 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 164 165 self.w_history: List[np.ndarray] = [] 166 self._record_history() 167 168 def _record_history(self) -> None: 169 """Store a snapshot of current coefficients.""" 170 self.w_history.append(np.asarray(self.w).copy()) 171 172 def _final_coeffs(self, coefficients: Any) -> Any: 173 """Return last coefficients from a history container (list or 2D array).""" 174 if coefficients is None: 175 return None 176 if isinstance(coefficients, list) and len(coefficients) > 0: 177 return coefficients[-1] 178 try: 179 a = np.asarray(coefficients) 180 if a.ndim == 2: 181 return a[-1, :] 182 except Exception: 183 pass 184 return coefficients 185 186 def _pack_results( 187 self, 188 outputs: np.ndarray, 189 errors: np.ndarray, 190 runtime_s: float, 191 error_type: str = "a_priori", 192 extra: Optional[Dict[str, Any]] = None, 193 ) -> OptimizationResult: 194 """Centralized output packaging to standardize results.""" 195 return OptimizationResult( 196 outputs=np.asarray(outputs), 197 errors=np.asarray(errors), 198 coefficients=np.asarray(self.w_history), 199 algorithm=self.__class__.__name__, 200 runtime_ms=float(runtime_s) * 1000.0, 201 error_type=str(error_type), 202 extra=extra, 203 ) 204 205 def filter_signal(self, input_signal: ArrayLike) -> np.ndarray: 206 """Filter an input signal using current coefficients. 207 208 Default implementation assumes an FIR structure with taps `self.w` and 209 regressor convention: 210 x_k = [x[k], x[k-1], ..., x[k-m]] 211 and output: 212 y[k] = w^H x_k (Hermitian for complex) 213 """ 214 x = np.asarray(input_signal, dtype=self._dtype) 215 n_samples = x.size 216 y = np.zeros(n_samples, dtype=self._dtype) 217 218 x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype) 219 x_padded[self.filter_order:] = x 220 221 for k in range(n_samples): 222 x_k = x_padded[k : k + self.filter_order + 1][::-1] 223 y[k] = np.dot(self.w.conj(), x_k) 224 225 return y 226 227 @classmethod 228 def default_test_init_kwargs(cls, order: int) -> dict: 229 """Override in subclasses to provide init kwargs for standardized tests.""" 230 return {} 231 232 @abstractmethod 233 def optimize( 234 self, 235 input_signal: ArrayLike, 236 desired_signal: ArrayLike, 237 **kwargs: Any, 238 ) -> Any: 239 """Run the adaptation procedure. 240 241 Subclasses should return either: 242 - OptimizationResult (recommended), or 243 - dict-like with standardized keys, if you are migrating older code. 244 """ 245 raise NotImplementedError 246 247 def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None: 248 """Reset coefficients and history.""" 249 if w_new is not None: 250 self.w = np.asarray(w_new, dtype=self._dtype) 251 else: 252 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 253 self.w_history = [] 254 self._record_history()
Abstract base class for all adaptive filters.
Parameters
filter_order: Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used as a generic size indicator for base allocation. w_init: Initial coefficient vector. If None, initialized to zeros.
Notes
- Subclasses should set
supports_complex = Trueif they support complex-valued data. - Subclasses are expected to call
_record_history()every iteration (or use helper methods) if they want coefficient trajectories.
205 def filter_signal(self, input_signal: ArrayLike) -> np.ndarray: 206 """Filter an input signal using current coefficients. 207 208 Default implementation assumes an FIR structure with taps `self.w` and 209 regressor convention: 210 x_k = [x[k], x[k-1], ..., x[k-m]] 211 and output: 212 y[k] = w^H x_k (Hermitian for complex) 213 """ 214 x = np.asarray(input_signal, dtype=self._dtype) 215 n_samples = x.size 216 y = np.zeros(n_samples, dtype=self._dtype) 217 218 x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype) 219 x_padded[self.filter_order:] = x 220 221 for k in range(n_samples): 222 x_k = x_padded[k : k + self.filter_order + 1][::-1] 223 y[k] = np.dot(self.w.conj(), x_k) 224 225 return y
Filter an input signal using current coefficients.
Default implementation assumes an FIR structure with taps self.w and
regressor convention:
x_k = [x[k], x[k-1], ..., x[k-m]]
and output:
y[k] = w^H x_k (Hermitian for complex)
227 @classmethod 228 def default_test_init_kwargs(cls, order: int) -> dict: 229 """Override in subclasses to provide init kwargs for standardized tests.""" 230 return {}
Override in subclasses to provide init kwargs for standardized tests.
232 @abstractmethod 233 def optimize( 234 self, 235 input_signal: ArrayLike, 236 desired_signal: ArrayLike, 237 **kwargs: Any, 238 ) -> Any: 239 """Run the adaptation procedure. 240 241 Subclasses should return either: 242 - OptimizationResult (recommended), or 243 - dict-like with standardized keys, if you are migrating older code. 244 """ 245 raise NotImplementedError
Run the adaptation procedure.
Subclasses should return either:
- OptimizationResult (recommended), or
- dict-like with standardized keys, if you are migrating older code.
247 def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None: 248 """Reset coefficients and history.""" 249 if w_new is not None: 250 self.w = np.asarray(w_new, dtype=self._dtype) 251 else: 252 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 253 self.w_history = [] 254 self._record_history()
Reset coefficients and history.
28class LMS(AdaptiveFilter): 29 """ 30 Complex LMS (Least-Mean Squares). 31 32 Implements the complex LMS recursion (Algorithm 3.2 - Diniz) for adaptive FIR 33 filtering. 34 35 Notes 36 ----- 37 - Complex-valued implementation (supports_complex = True). 38 - Uses the unified base API via `@validate_input`: 39 * optimize(input_signal=..., desired_signal=...) 40 * optimize(x=..., d=...) 41 * optimize(x, d) 42 - This implementation returns the a priori error: e[k] = d[k] - y[k] where 43 y[k] = w[k]^H x_k, and then updates: 44 w[k+1] = w[k] + mu * conj(e[k]) * x_k 45 """ 46 47 supports_complex: bool = True 48 49 step_size: float 50 51 def __init__( 52 self, 53 filter_order: int, 54 step_size: float = 1e-2, 55 w_init: Optional[ArrayLike] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 filter_order: 61 FIR order M (number of taps is M+1). 62 step_size: 63 Step-size (mu). 64 w_init: 65 Optional initial coefficients (length M+1). If None, zeros. 66 """ 67 super().__init__(filter_order=int(filter_order), w_init=w_init) 68 self.step_size = float(step_size) 69 70 @validate_input 71 def optimize( 72 self, 73 input_signal: np.ndarray, 74 desired_signal: np.ndarray, 75 verbose: bool = False, 76 ) -> OptimizationResult: 77 """ 78 Run LMS adaptation. 79 80 Parameters 81 ---------- 82 input_signal: 83 Input signal x[k]. 84 desired_signal: 85 Desired signal d[k]. 86 verbose: 87 If True, prints runtime. 88 89 Returns 90 ------- 91 OptimizationResult 92 outputs: 93 Filter output y[k]. 94 errors: 95 A priori error e[k] = d[k] - y[k]. 96 coefficients: 97 History of coefficients stored in the base class. 98 error_type: 99 "a_priori". 100 """ 101 tic: float = perf_counter() 102 103 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 104 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 105 106 n_samples: int = int(x.size) 107 m: int = int(self.filter_order) 108 109 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 110 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 111 112 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 113 x_padded[m:] = x 114 115 for k in range(n_samples): 116 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 117 118 y_k: complex = complex(np.vdot(self.w, x_k)) 119 outputs[k] = y_k 120 121 e_k: complex = d[k] - y_k 122 errors[k] = e_k 123 124 self.w = self.w + self.step_size * np.conj(e_k) * x_k 125 126 self._record_history() 127 128 runtime_s: float = perf_counter() - tic 129 if verbose: 130 print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms") 131 132 return self._pack_results( 133 outputs=outputs, 134 errors=errors, 135 runtime_s=runtime_s, 136 error_type="a_priori", 137 )
Complex LMS (Least-Mean Squares).
Implements the complex LMS recursion (Algorithm 3.2 - Diniz) for adaptive FIR filtering.
Notes
- Complex-valued implementation (supports_complex = True).
- Uses the unified base API via
@validate_input:- optimize(input_signal=..., desired_signal=...)
- optimize(x=..., d=...)
- optimize(x, d)
- This implementation returns the a priori error: e[k] = d[k] - y[k] where y[k] = w[k]^H x_k, and then updates: w[k+1] = w[k] + mu * conj(e[k]) * x_k
51 def __init__( 52 self, 53 filter_order: int, 54 step_size: float = 1e-2, 55 w_init: Optional[ArrayLike] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 filter_order: 61 FIR order M (number of taps is M+1). 62 step_size: 63 Step-size (mu). 64 w_init: 65 Optional initial coefficients (length M+1). If None, zeros. 66 """ 67 super().__init__(filter_order=int(filter_order), w_init=w_init) 68 self.step_size = float(step_size)
Parameters
filter_order: FIR order M (number of taps is M+1). step_size: Step-size (mu). w_init: Optional initial coefficients (length M+1). If None, zeros.
70 @validate_input 71 def optimize( 72 self, 73 input_signal: np.ndarray, 74 desired_signal: np.ndarray, 75 verbose: bool = False, 76 ) -> OptimizationResult: 77 """ 78 Run LMS adaptation. 79 80 Parameters 81 ---------- 82 input_signal: 83 Input signal x[k]. 84 desired_signal: 85 Desired signal d[k]. 86 verbose: 87 If True, prints runtime. 88 89 Returns 90 ------- 91 OptimizationResult 92 outputs: 93 Filter output y[k]. 94 errors: 95 A priori error e[k] = d[k] - y[k]. 96 coefficients: 97 History of coefficients stored in the base class. 98 error_type: 99 "a_priori". 100 """ 101 tic: float = perf_counter() 102 103 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 104 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 105 106 n_samples: int = int(x.size) 107 m: int = int(self.filter_order) 108 109 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 110 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 111 112 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 113 x_padded[m:] = x 114 115 for k in range(n_samples): 116 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 117 118 y_k: complex = complex(np.vdot(self.w, x_k)) 119 outputs[k] = y_k 120 121 e_k: complex = d[k] - y_k 122 errors[k] = e_k 123 124 self.w = self.w + self.step_size * np.conj(e_k) * x_k 125 126 self._record_history() 127 128 runtime_s: float = perf_counter() - tic 129 if verbose: 130 print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms") 131 132 return self._pack_results( 133 outputs=outputs, 134 errors=errors, 135 runtime_s=runtime_s, 136 error_type="a_priori", 137 )
Run LMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
28class NLMS(AdaptiveFilter): 29 """ 30 Complex NLMS (Normalized Least-Mean Squares). 31 32 Implements the normalized LMS recursion (Algorithm 4.3 - Diniz) for adaptive 33 FIR filtering. The update uses a step normalized by the regressor energy: 34 35 y[k] = w[k]^H x_k 36 e[k] = d[k] - y[k] 37 mu_k = mu / (||x_k||^2 + gamma) 38 w[k+1] = w[k] + mu_k * conj(e[k]) * x_k 39 40 Notes 41 ----- 42 - Complex-valued implementation (supports_complex = True). 43 - Uses the unified base API via `@validate_input`. 44 - `gamma` is a regularization constant to avoid division by zero. 45 """ 46 47 supports_complex: bool = True 48 49 step_size: float 50 gamma: float 51 52 def __init__( 53 self, 54 filter_order: int, 55 step_size: float = 1e-2, 56 gamma: float = 1e-6, 57 w_init: Optional[ArrayLike] = None, 58 ) -> None: 59 """ 60 Parameters 61 ---------- 62 filter_order: 63 FIR order M (number of taps is M+1). 64 step_size: 65 Base step-size (mu). 66 gamma: 67 Regularization to avoid division by (near) zero. 68 w_init: 69 Optional initial coefficients (length M+1). If None, zeros. 70 """ 71 super().__init__(filter_order=int(filter_order), w_init=w_init) 72 self.step_size = float(step_size) 73 self.gamma = float(gamma) 74 75 @validate_input 76 def optimize( 77 self, 78 input_signal: np.ndarray, 79 desired_signal: np.ndarray, 80 verbose: bool = False, 81 ) -> OptimizationResult: 82 """ 83 Run NLMS adaptation. 84 85 Parameters 86 ---------- 87 input_signal: 88 Input signal x[k]. 89 desired_signal: 90 Desired signal d[k]. 91 verbose: 92 If True, prints runtime. 93 94 Returns 95 ------- 96 OptimizationResult 97 outputs: 98 Filter output y[k]. 99 errors: 100 A priori error e[k] = d[k] - y[k]. 101 coefficients: 102 History of coefficients stored in the base class. 103 error_type: 104 "a_priori". 105 """ 106 tic: float = perf_counter() 107 108 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 109 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 110 111 n_samples: int = int(x.size) 112 m: int = int(self.filter_order) 113 114 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 115 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 116 117 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 118 x_padded[m:] = x 119 120 for k in range(n_samples): 121 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 122 123 y_k: complex = complex(np.vdot(self.w, x_k)) 124 outputs[k] = y_k 125 126 e_k: complex = d[k] - y_k 127 errors[k] = e_k 128 129 norm_xk: float = float(np.vdot(x_k, x_k).real) 130 mu_k: float = self.step_size / (norm_xk + self.gamma) 131 132 self.w = self.w + mu_k * np.conj(e_k) * x_k 133 134 self._record_history() 135 136 runtime_s: float = perf_counter() - tic 137 if verbose: 138 print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms") 139 140 return self._pack_results( 141 outputs=outputs, 142 errors=errors, 143 runtime_s=runtime_s, 144 error_type="a_priori", 145 )
Complex NLMS (Normalized Least-Mean Squares).
Implements the normalized LMS recursion (Algorithm 4.3 - Diniz) for adaptive FIR filtering. The update uses a step normalized by the regressor energy:
y[k] = w[k]^H x_k
e[k] = d[k] - y[k]
mu_k = mu / (||x_k||^2 + gamma)
w[k+1] = w[k] + mu_k * conj(e[k]) * x_k
Notes
- Complex-valued implementation (supports_complex = True).
- Uses the unified base API via
@validate_input. gammais a regularization constant to avoid division by zero.
52 def __init__( 53 self, 54 filter_order: int, 55 step_size: float = 1e-2, 56 gamma: float = 1e-6, 57 w_init: Optional[ArrayLike] = None, 58 ) -> None: 59 """ 60 Parameters 61 ---------- 62 filter_order: 63 FIR order M (number of taps is M+1). 64 step_size: 65 Base step-size (mu). 66 gamma: 67 Regularization to avoid division by (near) zero. 68 w_init: 69 Optional initial coefficients (length M+1). If None, zeros. 70 """ 71 super().__init__(filter_order=int(filter_order), w_init=w_init) 72 self.step_size = float(step_size) 73 self.gamma = float(gamma)
Parameters
filter_order: FIR order M (number of taps is M+1). step_size: Base step-size (mu). gamma: Regularization to avoid division by (near) zero. w_init: Optional initial coefficients (length M+1). If None, zeros.
75 @validate_input 76 def optimize( 77 self, 78 input_signal: np.ndarray, 79 desired_signal: np.ndarray, 80 verbose: bool = False, 81 ) -> OptimizationResult: 82 """ 83 Run NLMS adaptation. 84 85 Parameters 86 ---------- 87 input_signal: 88 Input signal x[k]. 89 desired_signal: 90 Desired signal d[k]. 91 verbose: 92 If True, prints runtime. 93 94 Returns 95 ------- 96 OptimizationResult 97 outputs: 98 Filter output y[k]. 99 errors: 100 A priori error e[k] = d[k] - y[k]. 101 coefficients: 102 History of coefficients stored in the base class. 103 error_type: 104 "a_priori". 105 """ 106 tic: float = perf_counter() 107 108 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 109 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 110 111 n_samples: int = int(x.size) 112 m: int = int(self.filter_order) 113 114 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 115 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 116 117 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 118 x_padded[m:] = x 119 120 for k in range(n_samples): 121 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 122 123 y_k: complex = complex(np.vdot(self.w, x_k)) 124 outputs[k] = y_k 125 126 e_k: complex = d[k] - y_k 127 errors[k] = e_k 128 129 norm_xk: float = float(np.vdot(x_k, x_k).real) 130 mu_k: float = self.step_size / (norm_xk + self.gamma) 131 132 self.w = self.w + mu_k * np.conj(e_k) * x_k 133 134 self._record_history() 135 136 runtime_s: float = perf_counter() - tic 137 if verbose: 138 print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms") 139 140 return self._pack_results( 141 outputs=outputs, 142 errors=errors, 143 runtime_s=runtime_s, 144 error_type="a_priori", 145 )
Run NLMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
29class AffineProjection(AdaptiveFilter): 30 """ 31 Complex Affine Projection Algorithm (APA). 32 33 Implements Algorithm 4.6 (Diniz) using an affine-projection update with data reuse. 34 35 Notes 36 ----- 37 - This implementation supports complex-valued data (supports_complex=True). 38 - The base decorator `@validate_input` allows calling optimize with: 39 * optimize(input_signal=..., desired_signal=...) 40 * optimize(x=..., d=...) 41 * optimize(x, d) 42 """ 43 44 supports_complex: bool = True 45 46 step_size: float 47 gamma: float 48 memory_length: int 49 50 def __init__( 51 self, 52 filter_order: int, 53 step_size: float = 1e-2, 54 gamma: float = 1e-6, 55 L: int = 2, 56 w_init: Optional[ArrayLike] = None, 57 ) -> None: 58 """ 59 Parameters 60 ---------- 61 filter_order: 62 FIR order M (number of taps is M+1). 63 step_size: 64 Step-size / relaxation factor (mu). 65 gamma: 66 Diagonal loading regularization to ensure invertibility. 67 L: 68 Data reuse factor (projection order). Uses L+1 past regressors. 69 w_init: 70 Optional initial weights (length M+1). If None, initializes to zeros. 71 """ 72 super().__init__(filter_order=int(filter_order), w_init=w_init) 73 self.step_size = float(step_size) 74 self.gamma = float(gamma) 75 self.memory_length = int(L) 76 77 @validate_input 78 def optimize( 79 self, 80 input_signal: np.ndarray, 81 desired_signal: np.ndarray, 82 verbose: bool = False, 83 return_internal_states: bool = False, 84 ) -> OptimizationResult: 85 """ 86 Run APA adaptation. 87 88 Parameters 89 ---------- 90 input_signal: 91 Input signal x[k]. 92 desired_signal: 93 Desired signal d[k]. 94 verbose: 95 If True, prints runtime. 96 return_internal_states: 97 If True, returns the last regressor matrix and last correlation matrix in result.extra. 98 99 Returns 100 ------- 101 OptimizationResult 102 outputs: 103 Filter output y[k] (a priori). 104 errors: 105 Error e[k] = d[k] - y[k] (a priori). 106 coefficients: 107 Coefficient history (self.w_history) as a 2D array. 108 error_type: 109 "a_priori". 110 extra (optional): 111 last_regressor_matrix, last_correlation_matrix. 112 """ 113 tic: float = perf_counter() 114 115 x: np.ndarray = np.asarray(input_signal).ravel() 116 d: np.ndarray = np.asarray(desired_signal).ravel() 117 118 dtype = complex 119 x = np.asarray(input_signal, dtype=dtype).ravel() 120 d = np.asarray(desired_signal, dtype=dtype).ravel() 121 122 n_samples: int = int(x.size) 123 m: int = int(self.filter_order) 124 L: int = int(self.memory_length) 125 126 outputs: np.ndarray = np.zeros(n_samples, dtype=dtype) 127 errors: np.ndarray = np.zeros(n_samples, dtype=dtype) 128 129 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype) 130 x_padded[m:] = x 131 132 X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype) 133 D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype) 134 135 last_corr: Optional[np.ndarray] = None 136 137 eye_L: np.ndarray = np.eye(L + 1, dtype=dtype) 138 139 for k in range(n_samples): 140 X_matrix[1:] = X_matrix[:-1] 141 X_matrix[0] = x_padded[k : k + m + 1][::-1] 142 143 D_vector[1:] = D_vector[:-1] 144 D_vector[0] = d[k] 145 146 Y_vector: np.ndarray = X_matrix @ self.w.conj() 147 E_vector: np.ndarray = D_vector - Y_vector 148 149 outputs[k] = Y_vector[0] 150 errors[k] = E_vector[0] 151 152 corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L) 153 last_corr = corr_matrix 154 155 try: 156 u: np.ndarray = np.linalg.solve(corr_matrix, E_vector) 157 except np.linalg.LinAlgError: 158 u = np.linalg.pinv(corr_matrix) @ E_vector 159 160 self.w = self.w + self.step_size * (X_matrix.conj().T @ u) 161 self._record_history() 162 163 runtime_s: float = perf_counter() - tic 164 if verbose: 165 print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms") 166 167 extra = None 168 if return_internal_states: 169 extra = { 170 "last_regressor_matrix": X_matrix.copy(), 171 "last_correlation_matrix": None if last_corr is None else last_corr.copy(), 172 } 173 174 return self._pack_results( 175 outputs=outputs, 176 errors=errors, 177 runtime_s=runtime_s, 178 error_type="a_priori", 179 extra=extra, 180 )
Complex Affine Projection Algorithm (APA).
Implements Algorithm 4.6 (Diniz) using an affine-projection update with data reuse.
Notes
- This implementation supports complex-valued data (supports_complex=True).
- The base decorator
@validate_inputallows calling optimize with:- optimize(input_signal=..., desired_signal=...)
- optimize(x=..., d=...)
- optimize(x, d)
50 def __init__( 51 self, 52 filter_order: int, 53 step_size: float = 1e-2, 54 gamma: float = 1e-6, 55 L: int = 2, 56 w_init: Optional[ArrayLike] = None, 57 ) -> None: 58 """ 59 Parameters 60 ---------- 61 filter_order: 62 FIR order M (number of taps is M+1). 63 step_size: 64 Step-size / relaxation factor (mu). 65 gamma: 66 Diagonal loading regularization to ensure invertibility. 67 L: 68 Data reuse factor (projection order). Uses L+1 past regressors. 69 w_init: 70 Optional initial weights (length M+1). If None, initializes to zeros. 71 """ 72 super().__init__(filter_order=int(filter_order), w_init=w_init) 73 self.step_size = float(step_size) 74 self.gamma = float(gamma) 75 self.memory_length = int(L)
Parameters
filter_order: FIR order M (number of taps is M+1). step_size: Step-size / relaxation factor (mu). gamma: Diagonal loading regularization to ensure invertibility. L: Data reuse factor (projection order). Uses L+1 past regressors. w_init: Optional initial weights (length M+1). If None, initializes to zeros.
77 @validate_input 78 def optimize( 79 self, 80 input_signal: np.ndarray, 81 desired_signal: np.ndarray, 82 verbose: bool = False, 83 return_internal_states: bool = False, 84 ) -> OptimizationResult: 85 """ 86 Run APA adaptation. 87 88 Parameters 89 ---------- 90 input_signal: 91 Input signal x[k]. 92 desired_signal: 93 Desired signal d[k]. 94 verbose: 95 If True, prints runtime. 96 return_internal_states: 97 If True, returns the last regressor matrix and last correlation matrix in result.extra. 98 99 Returns 100 ------- 101 OptimizationResult 102 outputs: 103 Filter output y[k] (a priori). 104 errors: 105 Error e[k] = d[k] - y[k] (a priori). 106 coefficients: 107 Coefficient history (self.w_history) as a 2D array. 108 error_type: 109 "a_priori". 110 extra (optional): 111 last_regressor_matrix, last_correlation_matrix. 112 """ 113 tic: float = perf_counter() 114 115 x: np.ndarray = np.asarray(input_signal).ravel() 116 d: np.ndarray = np.asarray(desired_signal).ravel() 117 118 dtype = complex 119 x = np.asarray(input_signal, dtype=dtype).ravel() 120 d = np.asarray(desired_signal, dtype=dtype).ravel() 121 122 n_samples: int = int(x.size) 123 m: int = int(self.filter_order) 124 L: int = int(self.memory_length) 125 126 outputs: np.ndarray = np.zeros(n_samples, dtype=dtype) 127 errors: np.ndarray = np.zeros(n_samples, dtype=dtype) 128 129 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype) 130 x_padded[m:] = x 131 132 X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype) 133 D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype) 134 135 last_corr: Optional[np.ndarray] = None 136 137 eye_L: np.ndarray = np.eye(L + 1, dtype=dtype) 138 139 for k in range(n_samples): 140 X_matrix[1:] = X_matrix[:-1] 141 X_matrix[0] = x_padded[k : k + m + 1][::-1] 142 143 D_vector[1:] = D_vector[:-1] 144 D_vector[0] = d[k] 145 146 Y_vector: np.ndarray = X_matrix @ self.w.conj() 147 E_vector: np.ndarray = D_vector - Y_vector 148 149 outputs[k] = Y_vector[0] 150 errors[k] = E_vector[0] 151 152 corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L) 153 last_corr = corr_matrix 154 155 try: 156 u: np.ndarray = np.linalg.solve(corr_matrix, E_vector) 157 except np.linalg.LinAlgError: 158 u = np.linalg.pinv(corr_matrix) @ E_vector 159 160 self.w = self.w + self.step_size * (X_matrix.conj().T @ u) 161 self._record_history() 162 163 runtime_s: float = perf_counter() - tic 164 if verbose: 165 print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms") 166 167 extra = None 168 if return_internal_states: 169 extra = { 170 "last_regressor_matrix": X_matrix.copy(), 171 "last_correlation_matrix": None if last_corr is None else last_corr.copy(), 172 } 173 174 return self._pack_results( 175 outputs=outputs, 176 errors=errors, 177 runtime_s=runtime_s, 178 error_type="a_priori", 179 extra=extra, 180 )
Run APA adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns the last regressor matrix and last correlation matrix in result.extra.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: Error e[k] = d[k] - y[k] (a priori). coefficients: Coefficient history (self.w_history) as a 2D array. error_type: "a_priori". extra (optional): last_regressor_matrix, last_correlation_matrix.
28class SignData(AdaptiveFilter): 29 """ 30 Sign-Data LMS (complex-valued). 31 32 This is a low-complexity LMS variant where the input regressor is replaced by 33 its elementwise sign: 34 35 y[k] = w^H x_k 36 e[k] = d[k] - y[k] 37 w <- w + 2 * mu * conj(e[k]) * sign(x_k) 38 39 Notes 40 ----- 41 - Complex-valued implementation (supports_complex=True). 42 - Uses the unified base API via `@validate_input`. 43 - Returns a priori error by default (e[k] computed before update). 44 """ 45 46 supports_complex: bool = True 47 48 def __init__( 49 self, 50 filter_order: int, 51 step_size: float = 1e-2, 52 w_init: Optional[ArrayLike] = None, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 FIR order M (number of taps is M+1). 59 step_size: 60 Step-size (mu). 61 w_init: 62 Optional initial coefficients (length M+1). If None, zeros. 63 """ 64 super().__init__(filter_order=int(filter_order), w_init=w_init) 65 self.step_size = float(step_size) 66 67 @validate_input 68 def optimize( 69 self, 70 input_signal: np.ndarray, 71 desired_signal: np.ndarray, 72 verbose: bool = False, 73 return_internal_states: bool = False, 74 ) -> OptimizationResult: 75 """ 76 Run Sign-Data LMS adaptation. 77 78 Parameters 79 ---------- 80 input_signal: 81 Input signal x[k]. 82 desired_signal: 83 Desired signal d[k]. 84 verbose: 85 If True, prints runtime. 86 return_internal_states: 87 If True, returns the last regressor sign vector in result.extra. 88 89 Returns 90 ------- 91 OptimizationResult 92 outputs: 93 Filter output y[k]. 94 errors: 95 A priori error e[k] = d[k] - y[k]. 96 coefficients: 97 Coefficient history stored in the base class. 98 error_type: 99 "a_priori". 100 """ 101 t0 = perf_counter() 102 103 x = np.asarray(input_signal, dtype=complex).ravel() 104 d = np.asarray(desired_signal, dtype=complex).ravel() 105 106 n_samples = int(x.size) 107 m = int(self.filter_order) 108 109 outputs = np.zeros(n_samples, dtype=complex) 110 errors = np.zeros(n_samples, dtype=complex) 111 112 x_padded = np.zeros(n_samples + m, dtype=complex) 113 x_padded[m:] = x 114 115 last_sign_xk: Optional[np.ndarray] = None 116 117 for k in range(n_samples): 118 x_k = x_padded[k : k + m + 1][::-1] 119 120 y_k = complex(np.vdot(self.w, x_k)) 121 outputs[k] = y_k 122 123 e_k = d[k] - y_k 124 errors[k] = e_k 125 126 sign_xk = np.sign(x_k) 127 last_sign_xk = sign_xk 128 129 self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk 130 self._record_history() 131 132 runtime_s = float(perf_counter() - t0) 133 if verbose: 134 print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms") 135 136 extra: Optional[Dict[str, Any]] = None 137 if return_internal_states: 138 extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()} 139 140 return self._pack_results( 141 outputs=outputs, 142 errors=errors, 143 runtime_s=runtime_s, 144 error_type="a_priori", 145 extra=extra, 146 )
Sign-Data LMS (complex-valued).
This is a low-complexity LMS variant where the input regressor is replaced by its elementwise sign:
y[k] = w^H x_k
e[k] = d[k] - y[k]
w <- w + 2 * mu * conj(e[k]) * sign(x_k)
Notes
- Complex-valued implementation (supports_complex=True).
- Uses the unified base API via
@validate_input. - Returns a priori error by default (e[k] computed before update).
48 def __init__( 49 self, 50 filter_order: int, 51 step_size: float = 1e-2, 52 w_init: Optional[ArrayLike] = None, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 FIR order M (number of taps is M+1). 59 step_size: 60 Step-size (mu). 61 w_init: 62 Optional initial coefficients (length M+1). If None, zeros. 63 """ 64 super().__init__(filter_order=int(filter_order), w_init=w_init) 65 self.step_size = float(step_size)
Parameters
filter_order: FIR order M (number of taps is M+1). step_size: Step-size (mu). w_init: Optional initial coefficients (length M+1). If None, zeros.
67 @validate_input 68 def optimize( 69 self, 70 input_signal: np.ndarray, 71 desired_signal: np.ndarray, 72 verbose: bool = False, 73 return_internal_states: bool = False, 74 ) -> OptimizationResult: 75 """ 76 Run Sign-Data LMS adaptation. 77 78 Parameters 79 ---------- 80 input_signal: 81 Input signal x[k]. 82 desired_signal: 83 Desired signal d[k]. 84 verbose: 85 If True, prints runtime. 86 return_internal_states: 87 If True, returns the last regressor sign vector in result.extra. 88 89 Returns 90 ------- 91 OptimizationResult 92 outputs: 93 Filter output y[k]. 94 errors: 95 A priori error e[k] = d[k] - y[k]. 96 coefficients: 97 Coefficient history stored in the base class. 98 error_type: 99 "a_priori". 100 """ 101 t0 = perf_counter() 102 103 x = np.asarray(input_signal, dtype=complex).ravel() 104 d = np.asarray(desired_signal, dtype=complex).ravel() 105 106 n_samples = int(x.size) 107 m = int(self.filter_order) 108 109 outputs = np.zeros(n_samples, dtype=complex) 110 errors = np.zeros(n_samples, dtype=complex) 111 112 x_padded = np.zeros(n_samples + m, dtype=complex) 113 x_padded[m:] = x 114 115 last_sign_xk: Optional[np.ndarray] = None 116 117 for k in range(n_samples): 118 x_k = x_padded[k : k + m + 1][::-1] 119 120 y_k = complex(np.vdot(self.w, x_k)) 121 outputs[k] = y_k 122 123 e_k = d[k] - y_k 124 errors[k] = e_k 125 126 sign_xk = np.sign(x_k) 127 last_sign_xk = sign_xk 128 129 self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk 130 self._record_history() 131 132 runtime_s = float(perf_counter() - t0) 133 if verbose: 134 print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms") 135 136 extra: Optional[Dict[str, Any]] = None 137 if return_internal_states: 138 extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()} 139 140 return self._pack_results( 141 outputs=outputs, 142 errors=errors, 143 runtime_s=runtime_s, 144 error_type="a_priori", 145 extra=extra, 146 )
Run Sign-Data LMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns the last regressor sign vector in result.extra.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: Coefficient history stored in the base class. error_type: "a_priori".
29class SignError(AdaptiveFilter): 30 """ 31 Sign-Error LMS (real-valued). 32 33 This is a sign-error LMS variant that replaces the error term by its sign: 34 35 y[k] = w^T x_k 36 e[k] = d[k] - y[k] 37 w <- w + mu * sign(e[k]) * x_k 38 39 Notes 40 ----- 41 - Real-valued only: enforced by `ensure_real_signals`. 42 - Uses the unified base API via `validate_input`. 43 - Returns a priori error (computed before update). 44 """ 45 46 supports_complex: bool = False 47 48 def __init__( 49 self, 50 filter_order: int, 51 step_size: float = 1e-2, 52 w_init: Optional[ArrayLike] = None, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 FIR order M (number of taps is M+1). 59 step_size: 60 Step-size (mu). 61 w_init: 62 Optional initial coefficients (length M+1). If None, zeros. 63 """ 64 super().__init__(filter_order=int(filter_order), w_init=w_init) 65 self.step_size = float(step_size) 66 67 @validate_input 68 @ensure_real_signals 69 def optimize( 70 self, 71 input_signal: np.ndarray, 72 desired_signal: np.ndarray, 73 verbose: bool = False, 74 return_internal_states: bool = False, 75 ) -> OptimizationResult: 76 """ 77 Run Sign-Error LMS adaptation. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal x[k] (real). 83 desired_signal: 84 Desired signal d[k] (real). 85 verbose: 86 If True, prints runtime. 87 return_internal_states: 88 If True, returns the last sign(e[k]) value in result.extra. 89 90 Returns 91 ------- 92 OptimizationResult 93 outputs: 94 Filter output y[k]. 95 errors: 96 A priori error e[k] = d[k] - y[k]. 97 coefficients: 98 Coefficient history stored in the base class. 99 error_type: 100 "a_priori". 101 """ 102 t0 = perf_counter() 103 104 x = np.asarray(input_signal, dtype=np.float64).ravel() 105 d = np.asarray(desired_signal, dtype=np.float64).ravel() 106 107 n_samples = int(x.size) 108 m = int(self.filter_order) 109 110 outputs = np.zeros(n_samples, dtype=np.float64) 111 errors = np.zeros(n_samples, dtype=np.float64) 112 113 x_padded = np.zeros(n_samples + m, dtype=np.float64) 114 x_padded[m:] = x 115 116 last_sign_e: Optional[float] = None 117 118 for k in range(n_samples): 119 x_k = x_padded[k : k + m + 1][::-1] 120 121 y_k = float(np.dot(self.w, x_k)) 122 outputs[k] = y_k 123 124 e_k = float(d[k] - y_k) 125 errors[k] = e_k 126 127 s = float(np.sign(e_k)) 128 last_sign_e = s 129 130 self.w = self.w + self.step_size * s * x_k 131 self._record_history() 132 133 runtime_s = float(perf_counter() - t0) 134 if verbose: 135 print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms") 136 137 extra: Optional[Dict[str, Any]] = None 138 if return_internal_states: 139 extra = {"last_sign_error": last_sign_e} 140 141 return self._pack_results( 142 outputs=outputs, 143 errors=errors, 144 runtime_s=runtime_s, 145 error_type="a_priori", 146 extra=extra, 147 )
Sign-Error LMS (real-valued).
This is a sign-error LMS variant that replaces the error term by its sign:
y[k] = w^T x_k
e[k] = d[k] - y[k]
w <- w + mu * sign(e[k]) * x_k
Notes
- Real-valued only: enforced by
ensure_real_signals. - Uses the unified base API via
validate_input. - Returns a priori error (computed before update).
48 def __init__( 49 self, 50 filter_order: int, 51 step_size: float = 1e-2, 52 w_init: Optional[ArrayLike] = None, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 FIR order M (number of taps is M+1). 59 step_size: 60 Step-size (mu). 61 w_init: 62 Optional initial coefficients (length M+1). If None, zeros. 63 """ 64 super().__init__(filter_order=int(filter_order), w_init=w_init) 65 self.step_size = float(step_size)
Parameters
filter_order: FIR order M (number of taps is M+1). step_size: Step-size (mu). w_init: Optional initial coefficients (length M+1). If None, zeros.
67 @validate_input 68 @ensure_real_signals 69 def optimize( 70 self, 71 input_signal: np.ndarray, 72 desired_signal: np.ndarray, 73 verbose: bool = False, 74 return_internal_states: bool = False, 75 ) -> OptimizationResult: 76 """ 77 Run Sign-Error LMS adaptation. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal x[k] (real). 83 desired_signal: 84 Desired signal d[k] (real). 85 verbose: 86 If True, prints runtime. 87 return_internal_states: 88 If True, returns the last sign(e[k]) value in result.extra. 89 90 Returns 91 ------- 92 OptimizationResult 93 outputs: 94 Filter output y[k]. 95 errors: 96 A priori error e[k] = d[k] - y[k]. 97 coefficients: 98 Coefficient history stored in the base class. 99 error_type: 100 "a_priori". 101 """ 102 t0 = perf_counter() 103 104 x = np.asarray(input_signal, dtype=np.float64).ravel() 105 d = np.asarray(desired_signal, dtype=np.float64).ravel() 106 107 n_samples = int(x.size) 108 m = int(self.filter_order) 109 110 outputs = np.zeros(n_samples, dtype=np.float64) 111 errors = np.zeros(n_samples, dtype=np.float64) 112 113 x_padded = np.zeros(n_samples + m, dtype=np.float64) 114 x_padded[m:] = x 115 116 last_sign_e: Optional[float] = None 117 118 for k in range(n_samples): 119 x_k = x_padded[k : k + m + 1][::-1] 120 121 y_k = float(np.dot(self.w, x_k)) 122 outputs[k] = y_k 123 124 e_k = float(d[k] - y_k) 125 errors[k] = e_k 126 127 s = float(np.sign(e_k)) 128 last_sign_e = s 129 130 self.w = self.w + self.step_size * s * x_k 131 self._record_history() 132 133 runtime_s = float(perf_counter() - t0) 134 if verbose: 135 print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms") 136 137 extra: Optional[Dict[str, Any]] = None 138 if return_internal_states: 139 extra = {"last_sign_error": last_sign_e} 140 141 return self._pack_results( 142 outputs=outputs, 143 errors=errors, 144 runtime_s=runtime_s, 145 error_type="a_priori", 146 extra=extra, 147 )
Run Sign-Error LMS adaptation.
Parameters
input_signal: Input signal x[k] (real). desired_signal: Desired signal d[k] (real). verbose: If True, prints runtime. return_internal_states: If True, returns the last sign(e[k]) value in result.extra.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: Coefficient history stored in the base class. error_type: "a_priori".
29class DualSign(AdaptiveFilter): 30 """ 31 DualSign LMS (real-valued). 32 33 This is a sign-error LMS variant that switches between two effective gains 34 depending on the error magnitude, controlled by a threshold `rho`. 35 36 Notes 37 ----- 38 - Real-valued only: enforced by `@ensure_real_signals`. 39 - Uses the unified base API via `@validate_input`: 40 * optimize(input_signal=..., desired_signal=...) 41 * optimize(x=..., d=...) 42 * optimize(x, d) 43 44 Update rule (one common form) 45 ----------------------------- 46 e[k] = d[k] - y[k] 47 u[k] = sign(e[k]) if |e[k]| <= rho 48 = gamma * sign(e[k]) if |e[k]| > rho 49 w <- w + 2 * mu * u[k] * x_k 50 """ 51 52 supports_complex: bool = False 53 54 rho: float 55 gamma: float 56 step_size: float 57 58 def __init__( 59 self, 60 filter_order: int, 61 rho: float, 62 gamma: float, 63 step: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 safe_eps: float = 1e-12, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). 73 rho: 74 Threshold on |e[k]| that selects which sign gain is used. 75 gamma: 76 Gain multiplier used when |e[k]| > rho. Typically an integer > 1. 77 step: 78 Step-size (mu). 79 w_init: 80 Optional initial coefficients (length M+1). If None, zeros. 81 safe_eps: 82 Small epsilon for internal safety checks (kept for consistency across the library). 83 """ 84 super().__init__(filter_order=int(filter_order), w_init=w_init) 85 self.rho = float(rho) 86 self.gamma = float(gamma) 87 self.step_size = float(step) 88 self._safe_eps = float(safe_eps) 89 90 @validate_input 91 @ensure_real_signals 92 def optimize( 93 self, 94 input_signal: np.ndarray, 95 desired_signal: np.ndarray, 96 verbose: bool = False, 97 ) -> OptimizationResult: 98 """ 99 Run DualSign LMS adaptation. 100 101 Parameters 102 ---------- 103 input_signal: 104 Input signal x[k] (real). 105 desired_signal: 106 Desired signal d[k] (real). 107 verbose: 108 If True, prints runtime. 109 110 Returns 111 ------- 112 OptimizationResult 113 outputs: 114 Filter output y[k]. 115 errors: 116 A priori error e[k] = d[k] - y[k]. 117 coefficients: 118 Coefficient history stored in the base class. 119 error_type: 120 "a_priori". 121 """ 122 tic: float = perf_counter() 123 124 x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel() 125 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel() 126 127 n_samples: int = int(x.size) 128 m: int = int(self.filter_order) 129 130 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 131 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 132 133 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64) 134 x_padded[m:] = x 135 136 for k in range(n_samples): 137 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 138 139 y_k: float = float(np.dot(self.w, x_k)) 140 outputs[k] = y_k 141 142 e_k: float = float(d[k] - y_k) 143 errors[k] = e_k 144 145 s: float = float(np.sign(e_k)) 146 if abs(e_k) > self.rho: 147 s *= self.gamma 148 149 self.w = self.w + (2.0 * self.step_size) * s * x_k 150 self._record_history() 151 152 runtime_s: float = perf_counter() - tic 153 if verbose: 154 print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms") 155 156 return self._pack_results( 157 outputs=outputs, 158 errors=errors, 159 runtime_s=runtime_s, 160 error_type="a_priori", 161 )
DualSign LMS (real-valued).
This is a sign-error LMS variant that switches between two effective gains
depending on the error magnitude, controlled by a threshold rho.
Notes
- Real-valued only: enforced by
@ensure_real_signals. - Uses the unified base API via
@validate_input:- optimize(input_signal=..., desired_signal=...)
- optimize(x=..., d=...)
- optimize(x, d)
Update rule (one common form)
e[k] = d[k] - y[k]
u[k] = sign(e[k]) if |e[k]| <= rho
= gamma * sign(e[k]) if |e[k]| > rho
w <- w + 2 * mu * u[k] * x_k
58 def __init__( 59 self, 60 filter_order: int, 61 rho: float, 62 gamma: float, 63 step: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 safe_eps: float = 1e-12, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). 73 rho: 74 Threshold on |e[k]| that selects which sign gain is used. 75 gamma: 76 Gain multiplier used when |e[k]| > rho. Typically an integer > 1. 77 step: 78 Step-size (mu). 79 w_init: 80 Optional initial coefficients (length M+1). If None, zeros. 81 safe_eps: 82 Small epsilon for internal safety checks (kept for consistency across the library). 83 """ 84 super().__init__(filter_order=int(filter_order), w_init=w_init) 85 self.rho = float(rho) 86 self.gamma = float(gamma) 87 self.step_size = float(step) 88 self._safe_eps = float(safe_eps)
Parameters
filter_order: FIR order M (number of taps is M+1). rho: Threshold on |e[k]| that selects which sign gain is used. gamma: Gain multiplier used when |e[k]| > rho. Typically an integer > 1. step: Step-size (mu). w_init: Optional initial coefficients (length M+1). If None, zeros. safe_eps: Small epsilon for internal safety checks (kept for consistency across the library).
90 @validate_input 91 @ensure_real_signals 92 def optimize( 93 self, 94 input_signal: np.ndarray, 95 desired_signal: np.ndarray, 96 verbose: bool = False, 97 ) -> OptimizationResult: 98 """ 99 Run DualSign LMS adaptation. 100 101 Parameters 102 ---------- 103 input_signal: 104 Input signal x[k] (real). 105 desired_signal: 106 Desired signal d[k] (real). 107 verbose: 108 If True, prints runtime. 109 110 Returns 111 ------- 112 OptimizationResult 113 outputs: 114 Filter output y[k]. 115 errors: 116 A priori error e[k] = d[k] - y[k]. 117 coefficients: 118 Coefficient history stored in the base class. 119 error_type: 120 "a_priori". 121 """ 122 tic: float = perf_counter() 123 124 x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel() 125 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel() 126 127 n_samples: int = int(x.size) 128 m: int = int(self.filter_order) 129 130 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 131 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 132 133 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64) 134 x_padded[m:] = x 135 136 for k in range(n_samples): 137 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 138 139 y_k: float = float(np.dot(self.w, x_k)) 140 outputs[k] = y_k 141 142 e_k: float = float(d[k] - y_k) 143 errors[k] = e_k 144 145 s: float = float(np.sign(e_k)) 146 if abs(e_k) > self.rho: 147 s *= self.gamma 148 149 self.w = self.w + (2.0 * self.step_size) * s * x_k 150 self._record_history() 151 152 runtime_s: float = perf_counter() - tic 153 if verbose: 154 print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms") 155 156 return self._pack_results( 157 outputs=outputs, 158 errors=errors, 159 runtime_s=runtime_s, 160 error_type="a_priori", 161 )
Run DualSign LMS adaptation.
Parameters
input_signal: Input signal x[k] (real). desired_signal: Desired signal d[k] (real). verbose: If True, prints runtime.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: Coefficient history stored in the base class. error_type: "a_priori".
28class LMSNewton(AdaptiveFilter): 29 """ 30 LMS-Newton (complex-valued). 31 32 This algorithm approximates a Newton step by maintaining a recursive estimate of 33 the inverse input correlation matrix, which tends to accelerate convergence in 34 correlated-input scenarios. 35 36 Notes 37 ----- 38 - Complex-valued implementation (supports_complex = True). 39 - Uses the unified base API via `@validate_input`: 40 * optimize(input_signal=..., desired_signal=...) 41 * optimize(x=..., d=...) 42 * optimize(x, d) 43 44 Recursion (one common form) 45 --------------------------- 46 Let P[k] approximate R_x^{-1}. With forgetting factor alpha (0 < alpha < 1), 47 and regressor x_k (shape (M+1,)), define: 48 49 phi = x_k^H P x_k 50 denom = (1-alpha)/alpha + phi 51 P <- (P - (P x_k x_k^H P)/denom) / (1-alpha) 52 w <- w + mu * conj(e[k]) * (P x_k) 53 54 where e[k] = d[k] - w^H x_k. 55 """ 56 57 supports_complex: bool = True 58 59 alpha: float 60 step_size: float 61 inv_rx: np.ndarray 62 63 def __init__( 64 self, 65 filter_order: int, 66 alpha: float, 67 initial_inv_rx: np.ndarray, 68 step: float = 1e-2, 69 w_init: Optional[ArrayLike] = None, 70 *, 71 safe_eps: float = 1e-12, 72 ) -> None: 73 """ 74 Parameters 75 ---------- 76 filter_order: 77 FIR order M (number of taps is M+1). 78 alpha: 79 Forgetting factor (0 < alpha < 1). 80 initial_inv_rx: 81 Initial inverse correlation matrix P[0], shape (M+1, M+1). 82 step: 83 Step-size mu. 84 w_init: 85 Optional initial coefficients (length M+1). If None, zeros. 86 safe_eps: 87 Small epsilon used to guard denominators. 88 """ 89 super().__init__(filter_order=int(filter_order), w_init=w_init) 90 91 self.alpha = float(alpha) 92 if not (0.0 < self.alpha < 1.0): 93 raise ValueError(f"alpha must satisfy 0 < alpha < 1. Got alpha={self.alpha}.") 94 95 P0 = np.asarray(initial_inv_rx, dtype=complex) 96 n_taps = int(filter_order) + 1 97 if P0.shape != (n_taps, n_taps): 98 raise ValueError( 99 f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}." 100 ) 101 self.inv_rx = P0 102 103 self.step_size = float(step) 104 self._safe_eps = float(safe_eps) 105 106 @validate_input 107 def optimize( 108 self, 109 input_signal: np.ndarray, 110 desired_signal: np.ndarray, 111 verbose: bool = False, 112 ) -> OptimizationResult: 113 """ 114 Run LMS-Newton adaptation. 115 116 Parameters 117 ---------- 118 input_signal: 119 Input signal x[k]. 120 desired_signal: 121 Desired signal d[k]. 122 verbose: 123 If True, prints runtime. 124 125 Returns 126 ------- 127 OptimizationResult 128 outputs: 129 Filter output y[k]. 130 errors: 131 A priori error e[k] = d[k] - y[k]. 132 coefficients: 133 History of coefficients stored in the base class. 134 error_type: 135 "a_priori". 136 """ 137 tic: float = perf_counter() 138 139 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 140 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 141 142 n_samples: int = int(x.size) 143 m: int = int(self.filter_order) 144 145 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 146 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 147 148 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 149 x_padded[m:] = x 150 151 for k in range(n_samples): 152 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 153 154 y_k: complex = complex(np.vdot(self.w, x_k)) 155 outputs[k] = y_k 156 157 e_k: complex = d[k] - y_k 158 errors[k] = e_k 159 160 x_col: np.ndarray = x_k.reshape(-1, 1) 161 Px: np.ndarray = self.inv_rx @ x_col 162 phi: complex = (x_col.conj().T @ Px).item() 163 164 denom: complex = ((1.0 - self.alpha) / self.alpha) + phi 165 if abs(denom) < self._safe_eps: 166 denom = denom + (self._safe_eps + 0.0j) 167 168 self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.alpha) 169 170 self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel() 171 172 self._record_history() 173 174 runtime_s: float = perf_counter() - tic 175 if verbose: 176 print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms") 177 178 return self._pack_results( 179 outputs=outputs, 180 errors=errors, 181 runtime_s=runtime_s, 182 error_type="a_priori", 183 )
LMS-Newton (complex-valued).
This algorithm approximates a Newton step by maintaining a recursive estimate of the inverse input correlation matrix, which tends to accelerate convergence in correlated-input scenarios.
Notes
- Complex-valued implementation (supports_complex = True).
- Uses the unified base API via
@validate_input:- optimize(input_signal=..., desired_signal=...)
- optimize(x=..., d=...)
- optimize(x, d)
Recursion (one common form)
Let P[k] approximate R_x^{-1}. With forgetting factor alpha (0 < alpha < 1), and regressor x_k (shape (M+1,)), define:
phi = x_k^H P x_k
denom = (1-alpha)/alpha + phi
P <- (P - (P x_k x_k^H P)/denom) / (1-alpha)
w <- w + mu * conj(e[k]) * (P x_k)
where e[k] = d[k] - w^H x_k.
63 def __init__( 64 self, 65 filter_order: int, 66 alpha: float, 67 initial_inv_rx: np.ndarray, 68 step: float = 1e-2, 69 w_init: Optional[ArrayLike] = None, 70 *, 71 safe_eps: float = 1e-12, 72 ) -> None: 73 """ 74 Parameters 75 ---------- 76 filter_order: 77 FIR order M (number of taps is M+1). 78 alpha: 79 Forgetting factor (0 < alpha < 1). 80 initial_inv_rx: 81 Initial inverse correlation matrix P[0], shape (M+1, M+1). 82 step: 83 Step-size mu. 84 w_init: 85 Optional initial coefficients (length M+1). If None, zeros. 86 safe_eps: 87 Small epsilon used to guard denominators. 88 """ 89 super().__init__(filter_order=int(filter_order), w_init=w_init) 90 91 self.alpha = float(alpha) 92 if not (0.0 < self.alpha < 1.0): 93 raise ValueError(f"alpha must satisfy 0 < alpha < 1. Got alpha={self.alpha}.") 94 95 P0 = np.asarray(initial_inv_rx, dtype=complex) 96 n_taps = int(filter_order) + 1 97 if P0.shape != (n_taps, n_taps): 98 raise ValueError( 99 f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}." 100 ) 101 self.inv_rx = P0 102 103 self.step_size = float(step) 104 self._safe_eps = float(safe_eps)
Parameters
filter_order: FIR order M (number of taps is M+1). alpha: Forgetting factor (0 < alpha < 1). initial_inv_rx: Initial inverse correlation matrix P[0], shape (M+1, M+1). step: Step-size mu. w_init: Optional initial coefficients (length M+1). If None, zeros. safe_eps: Small epsilon used to guard denominators.
106 @validate_input 107 def optimize( 108 self, 109 input_signal: np.ndarray, 110 desired_signal: np.ndarray, 111 verbose: bool = False, 112 ) -> OptimizationResult: 113 """ 114 Run LMS-Newton adaptation. 115 116 Parameters 117 ---------- 118 input_signal: 119 Input signal x[k]. 120 desired_signal: 121 Desired signal d[k]. 122 verbose: 123 If True, prints runtime. 124 125 Returns 126 ------- 127 OptimizationResult 128 outputs: 129 Filter output y[k]. 130 errors: 131 A priori error e[k] = d[k] - y[k]. 132 coefficients: 133 History of coefficients stored in the base class. 134 error_type: 135 "a_priori". 136 """ 137 tic: float = perf_counter() 138 139 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 140 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 141 142 n_samples: int = int(x.size) 143 m: int = int(self.filter_order) 144 145 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 146 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 147 148 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 149 x_padded[m:] = x 150 151 for k in range(n_samples): 152 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 153 154 y_k: complex = complex(np.vdot(self.w, x_k)) 155 outputs[k] = y_k 156 157 e_k: complex = d[k] - y_k 158 errors[k] = e_k 159 160 x_col: np.ndarray = x_k.reshape(-1, 1) 161 Px: np.ndarray = self.inv_rx @ x_col 162 phi: complex = (x_col.conj().T @ Px).item() 163 164 denom: complex = ((1.0 - self.alpha) / self.alpha) + phi 165 if abs(denom) < self._safe_eps: 166 denom = denom + (self._safe_eps + 0.0j) 167 168 self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.alpha) 169 170 self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel() 171 172 self._record_history() 173 174 runtime_s: float = perf_counter() - tic 175 if verbose: 176 print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms") 177 178 return self._pack_results( 179 outputs=outputs, 180 errors=errors, 181 runtime_s=runtime_s, 182 error_type="a_priori", 183 )
Run LMS-Newton adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime.
Returns
OptimizationResult outputs: Filter output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
29class Power2ErrorLMS(AdaptiveFilter): 30 """ 31 Power-of-Two Error LMS (real-valued). 32 33 This is an LMS variant where the instantaneous error is quantized to the nearest 34 power-of-two (or special cases), aiming at reducing computational complexity. 35 36 Quantization rule (as implemented here) 37 --------------------------------------- 38 Let e be the a priori error. 39 40 - If |e| >= 1: 41 q(e) = sign(e) 42 - Else if |e| < 2^(-bd+1): 43 q(e) = tau * sign(e) 44 - Else: 45 q(e) = 2^{floor(log2(|e|))} * sign(e) 46 47 Update: 48 w <- w + 2 * mu * q(e) * x_k 49 50 Notes 51 ----- 52 - Real-valued only: enforced by `ensure_real_signals`. 53 - Uses the unified base API via `validate_input`. 54 """ 55 56 supports_complex: bool = False 57 58 def __init__( 59 self, 60 filter_order: int, 61 bd: int, 62 tau: float, 63 step_size: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 ) -> None: 66 """ 67 Parameters 68 ---------- 69 filter_order: 70 FIR order M (number of taps is M+1). 71 bd: 72 Word length (signal bits) used in the small-error threshold 2^(-bd+1). 73 tau: 74 Gain factor used when |e| is very small (< 2^(-bd+1)). 75 step_size: 76 Step-size (mu). 77 w_init: 78 Optional initial coefficients (length M+1). If None, zeros. 79 """ 80 super().__init__(filter_order=int(filter_order), w_init=w_init) 81 self.bd = int(bd) 82 self.tau = float(tau) 83 self.step_size = float(step_size) 84 85 if self.bd <= 0: 86 raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.") 87 88 @validate_input 89 @ensure_real_signals 90 def optimize( 91 self, 92 input_signal: np.ndarray, 93 desired_signal: np.ndarray, 94 verbose: bool = False, 95 return_internal_states: bool = False, 96 ) -> OptimizationResult: 97 """ 98 Run Power-of-Two Error LMS adaptation. 99 100 Parameters 101 ---------- 102 input_signal: 103 Input signal x[k] (real). 104 desired_signal: 105 Desired signal d[k] (real). 106 verbose: 107 If True, prints runtime. 108 return_internal_states: 109 If True, returns the last quantized error value in result.extra. 110 111 Returns 112 ------- 113 OptimizationResult 114 outputs: 115 Filter output outputs[k]. 116 errors: 117 A priori error errors[k] = d[k] - outputs[k]. 118 coefficients: 119 History of coefficients stored in the base class. 120 error_type: 121 "a_priori". 122 """ 123 t0 = perf_counter() 124 125 x = np.asarray(input_signal, dtype=np.float64).ravel() 126 d = np.asarray(desired_signal, dtype=np.float64).ravel() 127 128 n_samples = int(x.size) 129 m = int(self.filter_order) 130 131 outputs = np.zeros(n_samples, dtype=np.float64) 132 errors = np.zeros(n_samples, dtype=np.float64) 133 134 x_padded = np.zeros(n_samples + m, dtype=np.float64) 135 x_padded[m:] = x 136 137 last_qe: Optional[float] = None 138 small_thr = 2.0 ** (-self.bd + 1) 139 140 for k in range(n_samples): 141 x_k = x_padded[k : k + m + 1][::-1] 142 143 y_k = float(np.dot(self.w, x_k)) 144 outputs[k] = y_k 145 146 e_k = float(d[k] - y_k) 147 errors[k] = e_k 148 149 abs_error = abs(e_k) 150 if abs_error >= 1.0: 151 qe = float(np.sign(e_k)) 152 elif abs_error < small_thr: 153 qe = float(self.tau * np.sign(e_k)) 154 else: 155 qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k)) 156 157 last_qe = qe 158 159 self.w = self.w + (2.0 * self.step_size) * qe * x_k 160 self._record_history() 161 162 runtime_s = float(perf_counter() - t0) 163 if verbose: 164 print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms") 165 166 extra: Optional[Dict[str, Any]] = None 167 if return_internal_states: 168 extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)} 169 170 return self._pack_results( 171 outputs=outputs, 172 errors=errors, 173 runtime_s=runtime_s, 174 error_type="a_priori", 175 extra=extra, 176 )
Power-of-Two Error LMS (real-valued).
This is an LMS variant where the instantaneous error is quantized to the nearest power-of-two (or special cases), aiming at reducing computational complexity.
Quantization rule (as implemented here)
Let e be the a priori error.
- If |e| >= 1: q(e) = sign(e)
- Else if |e| < 2^(-bd+1): q(e) = tau * sign(e)
- Else: q(e) = 2^{floor(log2(|e|))} * sign(e)
Update: w <- w + 2 * mu * q(e) * x_k
Notes
- Real-valued only: enforced by
ensure_real_signals. - Uses the unified base API via
validate_input.
58 def __init__( 59 self, 60 filter_order: int, 61 bd: int, 62 tau: float, 63 step_size: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 ) -> None: 66 """ 67 Parameters 68 ---------- 69 filter_order: 70 FIR order M (number of taps is M+1). 71 bd: 72 Word length (signal bits) used in the small-error threshold 2^(-bd+1). 73 tau: 74 Gain factor used when |e| is very small (< 2^(-bd+1)). 75 step_size: 76 Step-size (mu). 77 w_init: 78 Optional initial coefficients (length M+1). If None, zeros. 79 """ 80 super().__init__(filter_order=int(filter_order), w_init=w_init) 81 self.bd = int(bd) 82 self.tau = float(tau) 83 self.step_size = float(step_size) 84 85 if self.bd <= 0: 86 raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.")
Parameters
filter_order: FIR order M (number of taps is M+1). bd: Word length (signal bits) used in the small-error threshold 2^(-bd+1). tau: Gain factor used when |e| is very small (< 2^(-bd+1)). step_size: Step-size (mu). w_init: Optional initial coefficients (length M+1). If None, zeros.
88 @validate_input 89 @ensure_real_signals 90 def optimize( 91 self, 92 input_signal: np.ndarray, 93 desired_signal: np.ndarray, 94 verbose: bool = False, 95 return_internal_states: bool = False, 96 ) -> OptimizationResult: 97 """ 98 Run Power-of-Two Error LMS adaptation. 99 100 Parameters 101 ---------- 102 input_signal: 103 Input signal x[k] (real). 104 desired_signal: 105 Desired signal d[k] (real). 106 verbose: 107 If True, prints runtime. 108 return_internal_states: 109 If True, returns the last quantized error value in result.extra. 110 111 Returns 112 ------- 113 OptimizationResult 114 outputs: 115 Filter output outputs[k]. 116 errors: 117 A priori error errors[k] = d[k] - outputs[k]. 118 coefficients: 119 History of coefficients stored in the base class. 120 error_type: 121 "a_priori". 122 """ 123 t0 = perf_counter() 124 125 x = np.asarray(input_signal, dtype=np.float64).ravel() 126 d = np.asarray(desired_signal, dtype=np.float64).ravel() 127 128 n_samples = int(x.size) 129 m = int(self.filter_order) 130 131 outputs = np.zeros(n_samples, dtype=np.float64) 132 errors = np.zeros(n_samples, dtype=np.float64) 133 134 x_padded = np.zeros(n_samples + m, dtype=np.float64) 135 x_padded[m:] = x 136 137 last_qe: Optional[float] = None 138 small_thr = 2.0 ** (-self.bd + 1) 139 140 for k in range(n_samples): 141 x_k = x_padded[k : k + m + 1][::-1] 142 143 y_k = float(np.dot(self.w, x_k)) 144 outputs[k] = y_k 145 146 e_k = float(d[k] - y_k) 147 errors[k] = e_k 148 149 abs_error = abs(e_k) 150 if abs_error >= 1.0: 151 qe = float(np.sign(e_k)) 152 elif abs_error < small_thr: 153 qe = float(self.tau * np.sign(e_k)) 154 else: 155 qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k)) 156 157 last_qe = qe 158 159 self.w = self.w + (2.0 * self.step_size) * qe * x_k 160 self._record_history() 161 162 runtime_s = float(perf_counter() - t0) 163 if verbose: 164 print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms") 165 166 extra: Optional[Dict[str, Any]] = None 167 if return_internal_states: 168 extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)} 169 170 return self._pack_results( 171 outputs=outputs, 172 errors=errors, 173 runtime_s=runtime_s, 174 error_type="a_priori", 175 extra=extra, 176 )
Run Power-of-Two Error LMS adaptation.
Parameters
input_signal: Input signal x[k] (real). desired_signal: Desired signal d[k] (real). verbose: If True, prints runtime. return_internal_states: If True, returns the last quantized error value in result.extra.
Returns
OptimizationResult outputs: Filter output outputs[k]. errors: A priori error errors[k] = d[k] - outputs[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
28class TDomainLMS(AdaptiveFilter): 29 """ 30 Generic Transform-Domain LMS using a user-provided unitary transform matrix T. 31 32 This is a transform-domain LMS variant (Algorithm 4.4 - Diniz). Given a transform 33 z_k = T x_k and transform-domain weights w_T, the recursion is: 34 35 y[k] = w_T^H z_k 36 e[k] = d[k] - y[k] 37 P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1] 38 w_T <- w_T + mu * conj(e[k]) * z_k / (gamma + P_z[k]) 39 40 For library consistency, this implementation also exposes time-domain weights: 41 42 w_time = T^H w_T 43 44 Notes 45 ----- 46 - Complex-valued implementation (`supports_complex=True`). 47 - `OptimizationResult.coefficients` stores time-domain coefficient history (self.w_history). 48 - Transform-domain coefficient history is returned in `result.extra["coefficients_transform"]` 49 when requested. 50 - `transform_matrix` is expected to be unitary (T^H T = I). If it is not, the mapping 51 back to time domain is not the true inverse transform. 52 """ 53 54 supports_complex: bool = True 55 56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 transform_matrix: np.ndarray, 63 step_size: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 assume_unitary: bool = True, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). Transform size must be (M+1, M+1). 73 gamma: 74 Small positive constant to avoid division by (near) zero in each bin. 75 alpha: 76 Smoothing factor for power estimation (typically close to 1). 77 initial_power: 78 Initial power estimate for all transform bins. 79 transform_matrix: 80 Transform matrix T of shape (M+1, M+1). Typically unitary. 81 step_size: 82 Step-size (mu). 83 w_init: 84 Optional initial coefficients in time domain (length M+1). If None, zeros. 85 assume_unitary: 86 If True, uses w_time = T^H w_T. If False, uses a least-squares mapping 87 w_time = pinv(T)^H w_T (slower, but works for non-unitary transforms). 88 """ 89 super().__init__(filter_order=int(filter_order), w_init=w_init) 90 91 self.gamma = float(gamma) 92 self.alpha = float(alpha) 93 self.step_size = float(step_size) 94 95 self.N = int(self.filter_order + 1) 96 97 T = np.asarray(transform_matrix, dtype=complex) 98 if T.shape != (self.N, self.N): 99 raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.") 100 101 self.T = T 102 self._assume_unitary = bool(assume_unitary) 103 104 # transform-domain weights (start from time-domain w) 105 self.w_T = self.T @ np.asarray(self.w, dtype=complex) 106 107 # power estimate per transform bin 108 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 109 110 # optional transform-domain history 111 self._w_history_T: List[np.ndarray] = [self.w_T.copy()] 112 113 def _to_time_domain(self, w_T: np.ndarray) -> np.ndarray: 114 """Map transform-domain weights to time-domain weights.""" 115 if self._assume_unitary: 116 return self.T.conj().T @ w_T 117 # fallback for non-unitary transforms (more expensive) 118 T_pinv = np.linalg.pinv(self.T) 119 return T_pinv.conj().T @ w_T 120 121 @validate_input 122 def optimize( 123 self, 124 input_signal: np.ndarray, 125 desired_signal: np.ndarray, 126 verbose: bool = False, 127 return_internal_states: bool = False, 128 ) -> OptimizationResult: 129 """ 130 Run Transform-Domain LMS adaptation. 131 132 Parameters 133 ---------- 134 input_signal: 135 Input signal x[k]. 136 desired_signal: 137 Desired signal d[k]. 138 verbose: 139 If True, prints runtime. 140 return_internal_states: 141 If True, returns transform-domain coefficient history and final power vector in result.extra. 142 143 Returns 144 ------- 145 OptimizationResult 146 outputs: 147 Filter output y[k] (a priori). 148 errors: 149 A priori error e[k] = d[k] - y[k]. 150 coefficients: 151 Time-domain coefficient history stored in the base class. 152 error_type: 153 "a_priori". 154 """ 155 t0 = perf_counter() 156 157 x = np.asarray(input_signal, dtype=complex).ravel() 158 d = np.asarray(desired_signal, dtype=complex).ravel() 159 160 n_samples = int(d.size) 161 m = int(self.filter_order) 162 163 outputs = np.zeros(n_samples, dtype=complex) 164 errors = np.zeros(n_samples, dtype=complex) 165 166 x_padded = np.zeros(n_samples + m, dtype=complex) 167 x_padded[m:] = x 168 169 w_hist_T: List[np.ndarray] = [self.w_T.copy()] 170 171 for k in range(n_samples): 172 x_k = x_padded[k : k + m + 1][::-1] 173 z_k = self.T @ x_k 174 175 self.power_vector = ( 176 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 177 ) 178 179 y_k = complex(np.vdot(self.w_T, z_k)) 180 outputs[k] = y_k 181 182 e_k = d[k] - y_k 183 errors[k] = e_k 184 185 denom = self.gamma + self.power_vector 186 self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom) 187 188 self.w = self._to_time_domain(self.w_T) 189 190 self._record_history() 191 w_hist_T.append(self.w_T.copy()) 192 193 runtime_s = float(perf_counter() - t0) 194 if verbose: 195 print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms") 196 197 extra: Optional[Dict[str, Any]] = None 198 if return_internal_states: 199 extra = { 200 "coefficients_transform": np.asarray(w_hist_T), 201 "power_vector_last": self.power_vector.copy(), 202 "transform_matrix": self.T.copy(), 203 "assume_unitary": self._assume_unitary, 204 } 205 206 return self._pack_results( 207 outputs=outputs, 208 errors=errors, 209 runtime_s=runtime_s, 210 error_type="a_priori", 211 extra=extra, 212 )
Generic Transform-Domain LMS using a user-provided unitary transform matrix T.
This is a transform-domain LMS variant (Algorithm 4.4 - Diniz). Given a transform z_k = T x_k and transform-domain weights w_T, the recursion is:
y[k] = w_T^H z_k
e[k] = d[k] - y[k]
P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1]
w_T <- w_T + mu * conj(e[k]) * z_k / (gamma + P_z[k])
For library consistency, this implementation also exposes time-domain weights:
w_time = T^H w_T
Notes
- Complex-valued implementation (
supports_complex=True). OptimizationResult.coefficientsstores time-domain coefficient history (self.w_history).- Transform-domain coefficient history is returned in
result.extra["coefficients_transform"]when requested. transform_matrixis expected to be unitary (T^H T = I). If it is not, the mapping back to time domain is not the true inverse transform.
56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 transform_matrix: np.ndarray, 63 step_size: float = 1e-2, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 assume_unitary: bool = True, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). Transform size must be (M+1, M+1). 73 gamma: 74 Small positive constant to avoid division by (near) zero in each bin. 75 alpha: 76 Smoothing factor for power estimation (typically close to 1). 77 initial_power: 78 Initial power estimate for all transform bins. 79 transform_matrix: 80 Transform matrix T of shape (M+1, M+1). Typically unitary. 81 step_size: 82 Step-size (mu). 83 w_init: 84 Optional initial coefficients in time domain (length M+1). If None, zeros. 85 assume_unitary: 86 If True, uses w_time = T^H w_T. If False, uses a least-squares mapping 87 w_time = pinv(T)^H w_T (slower, but works for non-unitary transforms). 88 """ 89 super().__init__(filter_order=int(filter_order), w_init=w_init) 90 91 self.gamma = float(gamma) 92 self.alpha = float(alpha) 93 self.step_size = float(step_size) 94 95 self.N = int(self.filter_order + 1) 96 97 T = np.asarray(transform_matrix, dtype=complex) 98 if T.shape != (self.N, self.N): 99 raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.") 100 101 self.T = T 102 self._assume_unitary = bool(assume_unitary) 103 104 # transform-domain weights (start from time-domain w) 105 self.w_T = self.T @ np.asarray(self.w, dtype=complex) 106 107 # power estimate per transform bin 108 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 109 110 # optional transform-domain history 111 self._w_history_T: List[np.ndarray] = [self.w_T.copy()]
Parameters
filter_order: FIR order M (number of taps is M+1). Transform size must be (M+1, M+1). gamma: Small positive constant to avoid division by (near) zero in each bin. alpha: Smoothing factor for power estimation (typically close to 1). initial_power: Initial power estimate for all transform bins. transform_matrix: Transform matrix T of shape (M+1, M+1). Typically unitary. step_size: Step-size (mu). w_init: Optional initial coefficients in time domain (length M+1). If None, zeros. assume_unitary: If True, uses w_time = T^H w_T. If False, uses a least-squares mapping w_time = pinv(T)^H w_T (slower, but works for non-unitary transforms).
121 @validate_input 122 def optimize( 123 self, 124 input_signal: np.ndarray, 125 desired_signal: np.ndarray, 126 verbose: bool = False, 127 return_internal_states: bool = False, 128 ) -> OptimizationResult: 129 """ 130 Run Transform-Domain LMS adaptation. 131 132 Parameters 133 ---------- 134 input_signal: 135 Input signal x[k]. 136 desired_signal: 137 Desired signal d[k]. 138 verbose: 139 If True, prints runtime. 140 return_internal_states: 141 If True, returns transform-domain coefficient history and final power vector in result.extra. 142 143 Returns 144 ------- 145 OptimizationResult 146 outputs: 147 Filter output y[k] (a priori). 148 errors: 149 A priori error e[k] = d[k] - y[k]. 150 coefficients: 151 Time-domain coefficient history stored in the base class. 152 error_type: 153 "a_priori". 154 """ 155 t0 = perf_counter() 156 157 x = np.asarray(input_signal, dtype=complex).ravel() 158 d = np.asarray(desired_signal, dtype=complex).ravel() 159 160 n_samples = int(d.size) 161 m = int(self.filter_order) 162 163 outputs = np.zeros(n_samples, dtype=complex) 164 errors = np.zeros(n_samples, dtype=complex) 165 166 x_padded = np.zeros(n_samples + m, dtype=complex) 167 x_padded[m:] = x 168 169 w_hist_T: List[np.ndarray] = [self.w_T.copy()] 170 171 for k in range(n_samples): 172 x_k = x_padded[k : k + m + 1][::-1] 173 z_k = self.T @ x_k 174 175 self.power_vector = ( 176 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 177 ) 178 179 y_k = complex(np.vdot(self.w_T, z_k)) 180 outputs[k] = y_k 181 182 e_k = d[k] - y_k 183 errors[k] = e_k 184 185 denom = self.gamma + self.power_vector 186 self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom) 187 188 self.w = self._to_time_domain(self.w_T) 189 190 self._record_history() 191 w_hist_T.append(self.w_T.copy()) 192 193 runtime_s = float(perf_counter() - t0) 194 if verbose: 195 print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms") 196 197 extra: Optional[Dict[str, Any]] = None 198 if return_internal_states: 199 extra = { 200 "coefficients_transform": np.asarray(w_hist_T), 201 "power_vector_last": self.power_vector.copy(), 202 "transform_matrix": self.T.copy(), 203 "assume_unitary": self._assume_unitary, 204 } 205 206 return self._pack_results( 207 outputs=outputs, 208 errors=errors, 209 runtime_s=runtime_s, 210 error_type="a_priori", 211 extra=extra, 212 )
Run Transform-Domain LMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns transform-domain coefficient history and final power vector in result.extra.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: A priori error e[k] = d[k] - y[k]. coefficients: Time-domain coefficient history stored in the base class. error_type: "a_priori".
30class TDomainDCT(AdaptiveFilter): 31 """ 32 Transform-Domain LMS using a DCT matrix (complex-valued). 33 34 Implements the Transform-Domain LMS recursion (Algorithm 4.4 - Diniz), 35 where the regressor is transformed via an orthonormal DCT: 36 37 z_k = T x_k 38 y[k] = w_z^H z_k 39 e[k] = d[k] - y[k] 40 P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1] 41 w_z <- w_z + mu * conj(e[k]) * z_k / (gamma + P_z[k]) 42 43 Then the time-domain coefficients are recovered by: 44 w = T^T w_z (since T is orthonormal/real) 45 46 Library conventions 47 ------------------- 48 - Complex-valued implementation (`supports_complex=True`). 49 - `OptimizationResult.coefficients` stores time-domain coefficient history (self.w_history). 50 - Transform-domain coefficient history is provided in `result.extra["coefficients_dct"]` 51 when requested. 52 """ 53 54 supports_complex: bool = True 55 56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 step_size: float = 1e-2, 63 w_init: Optional[ArrayLike] = None, 64 ) -> None: 65 """ 66 Parameters 67 ---------- 68 filter_order: 69 FIR order M (number of taps is M+1). 70 gamma: 71 Regularization factor to avoid division by (near) zero in each bin. 72 alpha: 73 Smoothing factor for power estimation (typically close to 1). 74 initial_power: 75 Initial power estimate used for all transform bins. 76 step_size: 77 Step-size (mu). 78 w_init: 79 Optional initial coefficients in time domain (length M+1). If None, zeros. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.gamma = float(gamma) 84 self.alpha = float(alpha) 85 self.step_size = float(step_size) 86 87 self.N = int(self.filter_order + 1) 88 89 self.T = dct(np.eye(self.N), norm="ortho", axis=0) 90 91 self.w_dct = self.T @ np.asarray(self.w, dtype=complex) 92 93 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 94 95 self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()] 96 97 @validate_input 98 def optimize( 99 self, 100 input_signal: np.ndarray, 101 desired_signal: np.ndarray, 102 verbose: bool = False, 103 return_internal_states: bool = False, 104 ) -> OptimizationResult: 105 """ 106 Run Transform-Domain LMS (DCT) adaptation. 107 108 Parameters 109 ---------- 110 input_signal: 111 Input signal x[k]. 112 desired_signal: 113 Desired signal d[k]. 114 verbose: 115 If True, prints runtime. 116 return_internal_states: 117 If True, returns extra sequences such as DCT coefficients history and final power vector. 118 119 Returns 120 ------- 121 OptimizationResult 122 outputs: 123 Filter output y[k] (a priori). 124 errors: 125 A priori error e[k] = d[k] - y[k]. 126 coefficients: 127 Time-domain coefficient history stored in the base class. 128 error_type: 129 "a_priori". 130 131 Extra (when return_internal_states=True) 132 -------------------------------------- 133 extra["coefficients_dct"]: 134 List/array of transform-domain coefficient vectors over time. 135 extra["power_vector_last"]: 136 Final transform-bin power estimate. 137 extra["dct_matrix"]: 138 The DCT matrix T used (shape (M+1, M+1)). 139 """ 140 t0 = perf_counter() 141 142 x = np.asarray(input_signal, dtype=complex).ravel() 143 d = np.asarray(desired_signal, dtype=complex).ravel() 144 145 n_samples = int(d.size) 146 m = int(self.filter_order) 147 148 outputs = np.zeros(n_samples, dtype=complex) 149 errors = np.zeros(n_samples, dtype=complex) 150 151 x_padded = np.zeros(n_samples + m, dtype=complex) 152 x_padded[m:] = x 153 154 w_hist_dct: List[np.ndarray] = [self.w_dct.copy()] 155 156 for k in range(n_samples): 157 x_k = x_padded[k : k + m + 1][::-1] 158 z_k = self.T @ x_k 159 160 self.power_vector = ( 161 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 162 ) 163 164 y_k = complex(np.vdot(self.w_dct, z_k)) 165 outputs[k] = y_k 166 167 e_k = d[k] - y_k 168 errors[k] = e_k 169 170 denom = self.gamma + self.power_vector 171 self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom) 172 173 self.w = self.T.T @ self.w_dct 174 175 self._record_history() 176 w_hist_dct.append(self.w_dct.copy()) 177 178 runtime_s = float(perf_counter() - t0) 179 if verbose: 180 print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms") 181 182 extra: Optional[Dict[str, Any]] = None 183 if return_internal_states: 184 extra = { 185 "coefficients_dct": np.asarray(w_hist_dct), 186 "power_vector_last": self.power_vector.copy(), 187 "dct_matrix": self.T.copy(), 188 } 189 190 return self._pack_results( 191 outputs=outputs, 192 errors=errors, 193 runtime_s=runtime_s, 194 error_type="a_priori", 195 extra=extra, 196 )
Transform-Domain LMS using a DCT matrix (complex-valued).
Implements the Transform-Domain LMS recursion (Algorithm 4.4 - Diniz), where the regressor is transformed via an orthonormal DCT:
z_k = T x_k
y[k] = w_z^H z_k
e[k] = d[k] - y[k]
P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1]
w_z <- w_z + mu * conj(e[k]) * z_k / (gamma + P_z[k])
Then the time-domain coefficients are recovered by: w = T^T w_z (since T is orthonormal/real)
Library conventions
- Complex-valued implementation (
supports_complex=True). OptimizationResult.coefficientsstores time-domain coefficient history (self.w_history).- Transform-domain coefficient history is provided in
result.extra["coefficients_dct"]when requested.
56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 step_size: float = 1e-2, 63 w_init: Optional[ArrayLike] = None, 64 ) -> None: 65 """ 66 Parameters 67 ---------- 68 filter_order: 69 FIR order M (number of taps is M+1). 70 gamma: 71 Regularization factor to avoid division by (near) zero in each bin. 72 alpha: 73 Smoothing factor for power estimation (typically close to 1). 74 initial_power: 75 Initial power estimate used for all transform bins. 76 step_size: 77 Step-size (mu). 78 w_init: 79 Optional initial coefficients in time domain (length M+1). If None, zeros. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.gamma = float(gamma) 84 self.alpha = float(alpha) 85 self.step_size = float(step_size) 86 87 self.N = int(self.filter_order + 1) 88 89 self.T = dct(np.eye(self.N), norm="ortho", axis=0) 90 91 self.w_dct = self.T @ np.asarray(self.w, dtype=complex) 92 93 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 94 95 self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()]
Parameters
filter_order: FIR order M (number of taps is M+1). gamma: Regularization factor to avoid division by (near) zero in each bin. alpha: Smoothing factor for power estimation (typically close to 1). initial_power: Initial power estimate used for all transform bins. step_size: Step-size (mu). w_init: Optional initial coefficients in time domain (length M+1). If None, zeros.
97 @validate_input 98 def optimize( 99 self, 100 input_signal: np.ndarray, 101 desired_signal: np.ndarray, 102 verbose: bool = False, 103 return_internal_states: bool = False, 104 ) -> OptimizationResult: 105 """ 106 Run Transform-Domain LMS (DCT) adaptation. 107 108 Parameters 109 ---------- 110 input_signal: 111 Input signal x[k]. 112 desired_signal: 113 Desired signal d[k]. 114 verbose: 115 If True, prints runtime. 116 return_internal_states: 117 If True, returns extra sequences such as DCT coefficients history and final power vector. 118 119 Returns 120 ------- 121 OptimizationResult 122 outputs: 123 Filter output y[k] (a priori). 124 errors: 125 A priori error e[k] = d[k] - y[k]. 126 coefficients: 127 Time-domain coefficient history stored in the base class. 128 error_type: 129 "a_priori". 130 131 Extra (when return_internal_states=True) 132 -------------------------------------- 133 extra["coefficients_dct"]: 134 List/array of transform-domain coefficient vectors over time. 135 extra["power_vector_last"]: 136 Final transform-bin power estimate. 137 extra["dct_matrix"]: 138 The DCT matrix T used (shape (M+1, M+1)). 139 """ 140 t0 = perf_counter() 141 142 x = np.asarray(input_signal, dtype=complex).ravel() 143 d = np.asarray(desired_signal, dtype=complex).ravel() 144 145 n_samples = int(d.size) 146 m = int(self.filter_order) 147 148 outputs = np.zeros(n_samples, dtype=complex) 149 errors = np.zeros(n_samples, dtype=complex) 150 151 x_padded = np.zeros(n_samples + m, dtype=complex) 152 x_padded[m:] = x 153 154 w_hist_dct: List[np.ndarray] = [self.w_dct.copy()] 155 156 for k in range(n_samples): 157 x_k = x_padded[k : k + m + 1][::-1] 158 z_k = self.T @ x_k 159 160 self.power_vector = ( 161 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 162 ) 163 164 y_k = complex(np.vdot(self.w_dct, z_k)) 165 outputs[k] = y_k 166 167 e_k = d[k] - y_k 168 errors[k] = e_k 169 170 denom = self.gamma + self.power_vector 171 self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom) 172 173 self.w = self.T.T @ self.w_dct 174 175 self._record_history() 176 w_hist_dct.append(self.w_dct.copy()) 177 178 runtime_s = float(perf_counter() - t0) 179 if verbose: 180 print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms") 181 182 extra: Optional[Dict[str, Any]] = None 183 if return_internal_states: 184 extra = { 185 "coefficients_dct": np.asarray(w_hist_dct), 186 "power_vector_last": self.power_vector.copy(), 187 "dct_matrix": self.T.copy(), 188 } 189 190 return self._pack_results( 191 outputs=outputs, 192 errors=errors, 193 runtime_s=runtime_s, 194 error_type="a_priori", 195 extra=extra, 196 )
Run Transform-Domain LMS (DCT) adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns extra sequences such as DCT coefficients history and final power vector.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: A priori error e[k] = d[k] - y[k]. coefficients: Time-domain coefficient history stored in the base class. error_type: "a_priori".
Extra (when return_internal_states=True)
extra["coefficients_dct"]: List/array of transform-domain coefficient vectors over time. extra["power_vector_last"]: Final transform-bin power estimate. extra["dct_matrix"]: The DCT matrix T used (shape (M+1, M+1)).
30class TDomainDFT(AdaptiveFilter): 31 """ 32 Transform-Domain LMS using a DFT (complex-valued). 33 34 Implements a transform-domain LMS variant (Algorithm 4.4 - Diniz) where the 35 regressor is transformed via a unitary DFT: 36 37 z_k = FFT(x_k) / sqrt(N) 38 y[k] = w_z^H z_k 39 e[k] = d[k] - y[k] 40 P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1] 41 w_z <- w_z + mu * conj(e[k]) * z_k / (gamma + P_z[k]) 42 43 Time-domain coefficients are recovered by: 44 w = IFFT(w_z) * sqrt(N) 45 46 Library conventions 47 ------------------- 48 - Complex-valued implementation (`supports_complex=True`). 49 - `OptimizationResult.coefficients` stores time-domain coefficient history (self.w_history). 50 - Transform-domain coefficient history is provided in `result.extra["coefficients_dft"]` 51 when requested. 52 """ 53 54 supports_complex: bool = True 55 56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 step_size: float = 1e-2, 63 w_init: Optional[ArrayLike] = None, 64 ) -> None: 65 """ 66 Parameters 67 ---------- 68 filter_order: 69 FIR order M (number of taps is M+1). The DFT size is N = M+1. 70 gamma: 71 Small positive constant to avoid division by (near) zero in each bin. 72 alpha: 73 Smoothing factor for power estimation (typically close to 1). 74 initial_power: 75 Initial power estimate for all bins. 76 step_size: 77 Step-size (mu). 78 w_init: 79 Optional initial coefficients in time domain (length M+1). If None, zeros. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.gamma = float(gamma) 84 self.alpha = float(alpha) 85 self.step_size = float(step_size) 86 87 self.N = int(self.filter_order + 1) 88 self._sqrtN = float(np.sqrt(self.N)) 89 90 self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN 91 92 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 93 94 self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()] 95 96 @validate_input 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run Transform-Domain LMS (DFT) adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k]. 111 desired_signal: 112 Desired signal d[k]. 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, returns extra sequences such as DFT coefficients history and final power vector. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 Filter output y[k] (a priori). 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 Time-domain coefficient history stored in the base class. 127 error_type: 128 "a_priori". 129 """ 130 t0 = perf_counter() 131 132 x = np.asarray(input_signal, dtype=complex).ravel() 133 d = np.asarray(desired_signal, dtype=complex).ravel() 134 135 n_samples = int(d.size) 136 m = int(self.filter_order) 137 138 outputs = np.zeros(n_samples, dtype=complex) 139 errors = np.zeros(n_samples, dtype=complex) 140 141 x_padded = np.zeros(n_samples + m, dtype=complex) 142 x_padded[m:] = x 143 144 w_hist_dft: List[np.ndarray] = [self.w_dft.copy()] 145 146 for k in range(n_samples): 147 x_k = x_padded[k : k + m + 1][::-1] 148 z_k = fft(x_k) / self._sqrtN 149 150 self.power_vector = ( 151 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 152 ) 153 154 y_k = complex(np.vdot(self.w_dft, z_k)) 155 outputs[k] = y_k 156 157 e_k = d[k] - y_k 158 errors[k] = e_k 159 160 denom = self.gamma + self.power_vector 161 self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom) 162 163 self.w = ifft(self.w_dft) * self._sqrtN 164 165 self._record_history() 166 w_hist_dft.append(self.w_dft.copy()) 167 168 runtime_s = float(perf_counter() - t0) 169 if verbose: 170 print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms") 171 172 extra: Optional[Dict[str, Any]] = None 173 if return_internal_states: 174 extra = { 175 "coefficients_dft": np.asarray(w_hist_dft), 176 "power_vector_last": self.power_vector.copy(), 177 "sqrtN": self._sqrtN, 178 } 179 180 return self._pack_results( 181 outputs=outputs, 182 errors=errors, 183 runtime_s=runtime_s, 184 error_type="a_priori", 185 extra=extra, 186 )
Transform-Domain LMS using a DFT (complex-valued).
Implements a transform-domain LMS variant (Algorithm 4.4 - Diniz) where the regressor is transformed via a unitary DFT:
z_k = FFT(x_k) / sqrt(N)
y[k] = w_z^H z_k
e[k] = d[k] - y[k]
P_z[k] = alpha * |z_k|^2 + (1-alpha) * P_z[k-1]
w_z <- w_z + mu * conj(e[k]) * z_k / (gamma + P_z[k])
Time-domain coefficients are recovered by: w = IFFT(w_z) * sqrt(N)
Library conventions
- Complex-valued implementation (
supports_complex=True). OptimizationResult.coefficientsstores time-domain coefficient history (self.w_history).- Transform-domain coefficient history is provided in
result.extra["coefficients_dft"]when requested.
56 def __init__( 57 self, 58 filter_order: int, 59 gamma: float, 60 alpha: float, 61 initial_power: float, 62 step_size: float = 1e-2, 63 w_init: Optional[ArrayLike] = None, 64 ) -> None: 65 """ 66 Parameters 67 ---------- 68 filter_order: 69 FIR order M (number of taps is M+1). The DFT size is N = M+1. 70 gamma: 71 Small positive constant to avoid division by (near) zero in each bin. 72 alpha: 73 Smoothing factor for power estimation (typically close to 1). 74 initial_power: 75 Initial power estimate for all bins. 76 step_size: 77 Step-size (mu). 78 w_init: 79 Optional initial coefficients in time domain (length M+1). If None, zeros. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.gamma = float(gamma) 84 self.alpha = float(alpha) 85 self.step_size = float(step_size) 86 87 self.N = int(self.filter_order + 1) 88 self._sqrtN = float(np.sqrt(self.N)) 89 90 self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN 91 92 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 93 94 self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()]
Parameters
filter_order: FIR order M (number of taps is M+1). The DFT size is N = M+1. gamma: Small positive constant to avoid division by (near) zero in each bin. alpha: Smoothing factor for power estimation (typically close to 1). initial_power: Initial power estimate for all bins. step_size: Step-size (mu). w_init: Optional initial coefficients in time domain (length M+1). If None, zeros.
96 @validate_input 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run Transform-Domain LMS (DFT) adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k]. 111 desired_signal: 112 Desired signal d[k]. 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, returns extra sequences such as DFT coefficients history and final power vector. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 Filter output y[k] (a priori). 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 Time-domain coefficient history stored in the base class. 127 error_type: 128 "a_priori". 129 """ 130 t0 = perf_counter() 131 132 x = np.asarray(input_signal, dtype=complex).ravel() 133 d = np.asarray(desired_signal, dtype=complex).ravel() 134 135 n_samples = int(d.size) 136 m = int(self.filter_order) 137 138 outputs = np.zeros(n_samples, dtype=complex) 139 errors = np.zeros(n_samples, dtype=complex) 140 141 x_padded = np.zeros(n_samples + m, dtype=complex) 142 x_padded[m:] = x 143 144 w_hist_dft: List[np.ndarray] = [self.w_dft.copy()] 145 146 for k in range(n_samples): 147 x_k = x_padded[k : k + m + 1][::-1] 148 z_k = fft(x_k) / self._sqrtN 149 150 self.power_vector = ( 151 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 152 ) 153 154 y_k = complex(np.vdot(self.w_dft, z_k)) 155 outputs[k] = y_k 156 157 e_k = d[k] - y_k 158 errors[k] = e_k 159 160 denom = self.gamma + self.power_vector 161 self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom) 162 163 self.w = ifft(self.w_dft) * self._sqrtN 164 165 self._record_history() 166 w_hist_dft.append(self.w_dft.copy()) 167 168 runtime_s = float(perf_counter() - t0) 169 if verbose: 170 print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms") 171 172 extra: Optional[Dict[str, Any]] = None 173 if return_internal_states: 174 extra = { 175 "coefficients_dft": np.asarray(w_hist_dft), 176 "power_vector_last": self.power_vector.copy(), 177 "sqrtN": self._sqrtN, 178 } 179 180 return self._pack_results( 181 outputs=outputs, 182 errors=errors, 183 runtime_s=runtime_s, 184 error_type="a_priori", 185 extra=extra, 186 )
Run Transform-Domain LMS (DFT) adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns extra sequences such as DFT coefficients history and final power vector.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: A priori error e[k] = d[k] - y[k]. coefficients: Time-domain coefficient history stored in the base class. error_type: "a_priori".
28class RLS(AdaptiveFilter): 29 """ 30 Recursive Least Squares (RLS) for complex-valued adaptive FIR filtering. 31 32 Implements Algorithm 5.3 (Diniz). RLS minimizes an exponentially-weighted 33 least-squares cost by updating an inverse correlation matrix using the 34 matrix inversion lemma. 35 36 Recursion (common form) 37 ----------------------- 38 y[k] = w[k]^H x_k 39 e[k] = d[k] - y[k] 40 41 g[k] = (S[k-1] x_k) / (lambda + x_k^H S[k-1] x_k) 42 w[k] = w[k-1] + conj(e[k]) g[k] 43 S[k] = (S[k-1] - g[k] x_k^H S[k-1]) / lambda 44 45 Notes 46 ----- 47 - Complex-valued implementation (supports_complex=True). 48 - By default, returns a priori output/error. 49 - If `return_internal_states=True`, includes a posteriori sequences and 50 selected internal states in `result.extra`. 51 """ 52 53 supports_complex: bool = True 54 55 lamb: float 56 delta: float 57 S_d: np.ndarray 58 59 def __init__( 60 self, 61 filter_order: int, 62 delta: float, 63 lamb: float, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 safe_eps: float = 1e-12, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). 73 delta: 74 Initialization for S_d(0) = (1/delta) * I. Must be positive. 75 lamb: 76 Forgetting factor λ, typically 0 < λ <= 1. 77 w_init: 78 Optional initial coefficients (length M+1). If None, zeros. 79 safe_eps: 80 Small epsilon used to guard denominators. 81 """ 82 super().__init__(filter_order=int(filter_order), w_init=w_init) 83 84 self.lamb = float(lamb) 85 if not (0.0 < self.lamb <= 1.0): 86 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.") 87 88 self.delta = float(delta) 89 if self.delta <= 0.0: 90 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 91 92 self._safe_eps = float(safe_eps) 93 94 n_taps = int(self.filter_order) + 1 95 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex) 96 97 @validate_input 98 def optimize( 99 self, 100 input_signal: np.ndarray, 101 desired_signal: np.ndarray, 102 verbose: bool = False, 103 return_internal_states: bool = False, 104 ) -> OptimizationResult: 105 """ 106 Run RLS adaptation. 107 108 Parameters 109 ---------- 110 input_signal: 111 Input signal x[k]. 112 desired_signal: 113 Desired signal d[k]. 114 verbose: 115 If True, prints runtime. 116 return_internal_states: 117 If True, includes a posteriori sequences and final internal states in `result.extra`. 118 119 Returns 120 ------- 121 OptimizationResult 122 outputs: 123 A priori output y[k] = w^H x_k. 124 errors: 125 A priori error e[k] = d[k] - y[k]. 126 coefficients: 127 Coefficient history stored in the base class. 128 error_type: 129 "a_priori". 130 131 Extra (when return_internal_states=True) 132 -------------------------------------- 133 extra["outputs_posteriori"]: 134 A posteriori output y_post[k] computed after updating w. 135 extra["errors_posteriori"]: 136 A posteriori error e_post[k] = d[k] - y_post[k]. 137 extra["S_d_last"]: 138 Final inverse correlation matrix. 139 extra["gain_last"]: 140 Last gain vector g. 141 """ 142 tic: float = time() 143 144 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 145 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 146 147 n_samples: int = int(d.size) 148 149 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 150 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 151 152 y_post: Optional[np.ndarray] = None 153 e_post: Optional[np.ndarray] = None 154 if return_internal_states: 155 y_post = np.zeros(n_samples, dtype=complex) 156 e_post = np.zeros(n_samples, dtype=complex) 157 158 last_gain: Optional[np.ndarray] = None 159 160 for k in range(n_samples): 161 self.regressor = np.roll(self.regressor, 1) 162 self.regressor[0] = x[k] 163 164 y_k: complex = complex(np.vdot(self.w, self.regressor)) 165 e_k: complex = d[k] - y_k 166 167 outputs[k] = y_k 168 errors[k] = e_k 169 170 Sx: np.ndarray = self.S_d @ self.regressor 171 den: complex = self.lamb + complex(np.vdot(self.regressor, Sx)) 172 if abs(den) < self._safe_eps: 173 den = den + (self._safe_eps + 0.0j) 174 175 g: np.ndarray = Sx / den 176 last_gain = g 177 178 self.w = self.w + np.conj(e_k) * g 179 180 self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.lamb 181 182 if return_internal_states: 183 yk_post = complex(np.vdot(self.w, self.regressor)) 184 y_post[k] = yk_post 185 e_post[k] = d[k] - yk_post 186 187 self._record_history() 188 189 runtime_s: float = float(time() - tic) 190 if verbose: 191 print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms") 192 193 extra: Optional[Dict[str, Any]] = None 194 if return_internal_states: 195 extra = { 196 "outputs_posteriori": y_post, 197 "errors_posteriori": e_post, 198 "S_d_last": self.S_d.copy(), 199 "gain_last": None if last_gain is None else last_gain.copy(), 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="a_priori", 207 extra=extra, 208 )
Recursive Least Squares (RLS) for complex-valued adaptive FIR filtering.
Implements Algorithm 5.3 (Diniz). RLS minimizes an exponentially-weighted least-squares cost by updating an inverse correlation matrix using the matrix inversion lemma.
Recursion (common form)
y[k] = w[k]^H x_k
e[k] = d[k] - y[k]
g[k] = (S[k-1] x_k) / (lambda + x_k^H S[k-1] x_k)
w[k] = w[k-1] + conj(e[k]) g[k]
S[k] = (S[k-1] - g[k] x_k^H S[k-1]) / lambda
Notes
- Complex-valued implementation (supports_complex=True).
- By default, returns a priori output/error.
- If
return_internal_states=True, includes a posteriori sequences and selected internal states inresult.extra.
59 def __init__( 60 self, 61 filter_order: int, 62 delta: float, 63 lamb: float, 64 w_init: Optional[ArrayLike] = None, 65 *, 66 safe_eps: float = 1e-12, 67 ) -> None: 68 """ 69 Parameters 70 ---------- 71 filter_order: 72 FIR order M (number of taps is M+1). 73 delta: 74 Initialization for S_d(0) = (1/delta) * I. Must be positive. 75 lamb: 76 Forgetting factor λ, typically 0 < λ <= 1. 77 w_init: 78 Optional initial coefficients (length M+1). If None, zeros. 79 safe_eps: 80 Small epsilon used to guard denominators. 81 """ 82 super().__init__(filter_order=int(filter_order), w_init=w_init) 83 84 self.lamb = float(lamb) 85 if not (0.0 < self.lamb <= 1.0): 86 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.") 87 88 self.delta = float(delta) 89 if self.delta <= 0.0: 90 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 91 92 self._safe_eps = float(safe_eps) 93 94 n_taps = int(self.filter_order) + 1 95 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
Parameters
filter_order: FIR order M (number of taps is M+1). delta: Initialization for S_d(0) = (1/delta) * I. Must be positive. lamb: Forgetting factor λ, typically 0 < λ <= 1. w_init: Optional initial coefficients (length M+1). If None, zeros. safe_eps: Small epsilon used to guard denominators.
97 @validate_input 98 def optimize( 99 self, 100 input_signal: np.ndarray, 101 desired_signal: np.ndarray, 102 verbose: bool = False, 103 return_internal_states: bool = False, 104 ) -> OptimizationResult: 105 """ 106 Run RLS adaptation. 107 108 Parameters 109 ---------- 110 input_signal: 111 Input signal x[k]. 112 desired_signal: 113 Desired signal d[k]. 114 verbose: 115 If True, prints runtime. 116 return_internal_states: 117 If True, includes a posteriori sequences and final internal states in `result.extra`. 118 119 Returns 120 ------- 121 OptimizationResult 122 outputs: 123 A priori output y[k] = w^H x_k. 124 errors: 125 A priori error e[k] = d[k] - y[k]. 126 coefficients: 127 Coefficient history stored in the base class. 128 error_type: 129 "a_priori". 130 131 Extra (when return_internal_states=True) 132 -------------------------------------- 133 extra["outputs_posteriori"]: 134 A posteriori output y_post[k] computed after updating w. 135 extra["errors_posteriori"]: 136 A posteriori error e_post[k] = d[k] - y_post[k]. 137 extra["S_d_last"]: 138 Final inverse correlation matrix. 139 extra["gain_last"]: 140 Last gain vector g. 141 """ 142 tic: float = time() 143 144 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 145 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 146 147 n_samples: int = int(d.size) 148 149 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 150 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 151 152 y_post: Optional[np.ndarray] = None 153 e_post: Optional[np.ndarray] = None 154 if return_internal_states: 155 y_post = np.zeros(n_samples, dtype=complex) 156 e_post = np.zeros(n_samples, dtype=complex) 157 158 last_gain: Optional[np.ndarray] = None 159 160 for k in range(n_samples): 161 self.regressor = np.roll(self.regressor, 1) 162 self.regressor[0] = x[k] 163 164 y_k: complex = complex(np.vdot(self.w, self.regressor)) 165 e_k: complex = d[k] - y_k 166 167 outputs[k] = y_k 168 errors[k] = e_k 169 170 Sx: np.ndarray = self.S_d @ self.regressor 171 den: complex = self.lamb + complex(np.vdot(self.regressor, Sx)) 172 if abs(den) < self._safe_eps: 173 den = den + (self._safe_eps + 0.0j) 174 175 g: np.ndarray = Sx / den 176 last_gain = g 177 178 self.w = self.w + np.conj(e_k) * g 179 180 self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.lamb 181 182 if return_internal_states: 183 yk_post = complex(np.vdot(self.w, self.regressor)) 184 y_post[k] = yk_post 185 e_post[k] = d[k] - yk_post 186 187 self._record_history() 188 189 runtime_s: float = float(time() - tic) 190 if verbose: 191 print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms") 192 193 extra: Optional[Dict[str, Any]] = None 194 if return_internal_states: 195 extra = { 196 "outputs_posteriori": y_post, 197 "errors_posteriori": e_post, 198 "S_d_last": self.S_d.copy(), 199 "gain_last": None if last_gain is None else last_gain.copy(), 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="a_priori", 207 extra=extra, 208 )
Run RLS adaptation.
Parameters
input_signal:
Input signal x[k].
desired_signal:
Desired signal d[k].
verbose:
If True, prints runtime.
return_internal_states:
If True, includes a posteriori sequences and final internal states in result.extra.
Returns
OptimizationResult outputs: A priori output y[k] = w^H x_k. errors: A priori error e[k] = d[k] - y[k]. coefficients: Coefficient history stored in the base class. error_type: "a_priori".
Extra (when return_internal_states=True)
extra["outputs_posteriori"]: A posteriori output y_post[k] computed after updating w. extra["errors_posteriori"]: A posteriori error e_post[k] = d[k] - y_post[k]. extra["S_d_last"]: Final inverse correlation matrix. extra["gain_last"]: Last gain vector g.
30class RLSAlt(AdaptiveFilter): 31 """ 32 Alternative RLS (RLS-Alt) for complex-valued adaptive FIR filtering. 33 34 Implements Algorithm 5.4 (Diniz). This variant reduces computational burden by 35 using the auxiliary vector: 36 37 psi[k] = S_d[k-1] x_k 38 39 where S_d is the inverse correlation (or inverse deterministic autocorrelation) 40 matrix, and x_k is the tapped-delay-line regressor. 41 42 Notes 43 ----- 44 - Complex-valued implementation (supports_complex=True). 45 - Returns the **a priori** output and error by default: 46 y[k] = w[k]^H x_k 47 e[k] = d[k] - y[k] 48 and can optionally provide a posteriori sequences in `extra`. 49 - Uses unified base API via `@validate_input`. 50 """ 51 52 supports_complex: bool = True 53 54 lamb: float 55 delta: float 56 S_d: np.ndarray 57 58 def __init__( 59 self, 60 filter_order: int, 61 delta: float, 62 lamb: float, 63 w_init: Optional[ArrayLike] = None, 64 *, 65 safe_eps: float = 1e-12, 66 ) -> None: 67 """ 68 Parameters 69 ---------- 70 filter_order: 71 FIR order M (number of taps is M+1). 72 delta: 73 Initialization factor for S_d(0) = (1/delta) * I. Must be positive. 74 lamb: 75 Forgetting factor λ. Typically 0 < λ <= 1. 76 w_init: 77 Optional initial coefficients (length M+1). If None, zeros. 78 safe_eps: 79 Small epsilon used to guard denominators. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.lamb = float(lamb) 84 if not (0.0 < self.lamb <= 1.0): 85 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.") 86 87 self.delta = float(delta) 88 if self.delta <= 0.0: 89 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 90 91 self._safe_eps = float(safe_eps) 92 93 n_taps = int(self.filter_order) + 1 94 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex) 95 96 @validate_input 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run RLS-Alt adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k]. 111 desired_signal: 112 Desired signal d[k]. 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, includes a posteriori sequences and last internal matrices in `result.extra`. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 A priori output y[k] = w^H x_k. 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 Coefficient history stored in the base class. 127 error_type: 128 "a_priori". 129 130 Extra (when return_internal_states=True) 131 -------------------------------------- 132 extra["outputs_posteriori"]: 133 A posteriori output y_post[k] using updated w[k+1]. 134 extra["errors_posteriori"]: 135 A posteriori error e_post[k] = d[k] - y_post[k]. 136 extra["S_d_last"]: 137 Final inverse correlation matrix. 138 extra["gain_last"]: 139 Kalman gain-like vector g at last iteration. 140 """ 141 tic: float = time() 142 143 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 144 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 145 146 n_samples: int = int(d.size) 147 148 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 149 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 150 151 y_post: Optional[np.ndarray] = None 152 e_post: Optional[np.ndarray] = None 153 if return_internal_states: 154 y_post = np.zeros(n_samples, dtype=complex) 155 e_post = np.zeros(n_samples, dtype=complex) 156 157 last_gain: Optional[np.ndarray] = None 158 159 for k in range(n_samples): 160 self.regressor = np.roll(self.regressor, 1) 161 self.regressor[0] = x[k] 162 163 y_k = complex(np.vdot(self.w, self.regressor)) 164 e_k = d[k] - y_k 165 166 outputs[k] = y_k 167 errors[k] = e_k 168 169 psi: np.ndarray = self.S_d @ self.regressor 170 171 den: complex = self.lamb + complex(np.vdot(self.regressor, psi)) 172 if abs(den) < self._safe_eps: 173 den = den + (self._safe_eps + 0.0j) 174 175 g: np.ndarray = psi / den 176 last_gain = g 177 178 self.w = self.w + np.conj(e_k) * g 179 180 self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.lamb 181 182 if return_internal_states: 183 yk_post = complex(np.vdot(self.w, self.regressor)) 184 y_post[k] = yk_post 185 e_post[k] = d[k] - yk_post 186 187 self._record_history() 188 189 runtime_s: float = float(time() - tic) 190 if verbose: 191 print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms") 192 193 extra: Optional[Dict[str, Any]] = None 194 if return_internal_states: 195 extra = { 196 "outputs_posteriori": y_post, 197 "errors_posteriori": e_post, 198 "S_d_last": self.S_d.copy(), 199 "gain_last": None if last_gain is None else last_gain.copy(), 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="a_priori", 207 extra=extra, 208 )
Alternative RLS (RLS-Alt) for complex-valued adaptive FIR filtering.
Implements Algorithm 5.4 (Diniz). This variant reduces computational burden by using the auxiliary vector:
psi[k] = S_d[k-1] x_k
where S_d is the inverse correlation (or inverse deterministic autocorrelation) matrix, and x_k is the tapped-delay-line regressor.
Notes
- Complex-valued implementation (supports_complex=True).
- Returns the a priori output and error by default:
y[k] = w[k]^H x_k
e[k] = d[k] - y[k]
and can optionally provide a posteriori sequences in
extra. - Uses unified base API via
@validate_input.
58 def __init__( 59 self, 60 filter_order: int, 61 delta: float, 62 lamb: float, 63 w_init: Optional[ArrayLike] = None, 64 *, 65 safe_eps: float = 1e-12, 66 ) -> None: 67 """ 68 Parameters 69 ---------- 70 filter_order: 71 FIR order M (number of taps is M+1). 72 delta: 73 Initialization factor for S_d(0) = (1/delta) * I. Must be positive. 74 lamb: 75 Forgetting factor λ. Typically 0 < λ <= 1. 76 w_init: 77 Optional initial coefficients (length M+1). If None, zeros. 78 safe_eps: 79 Small epsilon used to guard denominators. 80 """ 81 super().__init__(filter_order=int(filter_order), w_init=w_init) 82 83 self.lamb = float(lamb) 84 if not (0.0 < self.lamb <= 1.0): 85 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got lamb={self.lamb}.") 86 87 self.delta = float(delta) 88 if self.delta <= 0.0: 89 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 90 91 self._safe_eps = float(safe_eps) 92 93 n_taps = int(self.filter_order) + 1 94 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
Parameters
filter_order: FIR order M (number of taps is M+1). delta: Initialization factor for S_d(0) = (1/delta) * I. Must be positive. lamb: Forgetting factor λ. Typically 0 < λ <= 1. w_init: Optional initial coefficients (length M+1). If None, zeros. safe_eps: Small epsilon used to guard denominators.
96 @validate_input 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run RLS-Alt adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k]. 111 desired_signal: 112 Desired signal d[k]. 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, includes a posteriori sequences and last internal matrices in `result.extra`. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 A priori output y[k] = w^H x_k. 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 Coefficient history stored in the base class. 127 error_type: 128 "a_priori". 129 130 Extra (when return_internal_states=True) 131 -------------------------------------- 132 extra["outputs_posteriori"]: 133 A posteriori output y_post[k] using updated w[k+1]. 134 extra["errors_posteriori"]: 135 A posteriori error e_post[k] = d[k] - y_post[k]. 136 extra["S_d_last"]: 137 Final inverse correlation matrix. 138 extra["gain_last"]: 139 Kalman gain-like vector g at last iteration. 140 """ 141 tic: float = time() 142 143 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 144 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 145 146 n_samples: int = int(d.size) 147 148 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 149 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 150 151 y_post: Optional[np.ndarray] = None 152 e_post: Optional[np.ndarray] = None 153 if return_internal_states: 154 y_post = np.zeros(n_samples, dtype=complex) 155 e_post = np.zeros(n_samples, dtype=complex) 156 157 last_gain: Optional[np.ndarray] = None 158 159 for k in range(n_samples): 160 self.regressor = np.roll(self.regressor, 1) 161 self.regressor[0] = x[k] 162 163 y_k = complex(np.vdot(self.w, self.regressor)) 164 e_k = d[k] - y_k 165 166 outputs[k] = y_k 167 errors[k] = e_k 168 169 psi: np.ndarray = self.S_d @ self.regressor 170 171 den: complex = self.lamb + complex(np.vdot(self.regressor, psi)) 172 if abs(den) < self._safe_eps: 173 den = den + (self._safe_eps + 0.0j) 174 175 g: np.ndarray = psi / den 176 last_gain = g 177 178 self.w = self.w + np.conj(e_k) * g 179 180 self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.lamb 181 182 if return_internal_states: 183 yk_post = complex(np.vdot(self.w, self.regressor)) 184 y_post[k] = yk_post 185 e_post[k] = d[k] - yk_post 186 187 self._record_history() 188 189 runtime_s: float = float(time() - tic) 190 if verbose: 191 print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms") 192 193 extra: Optional[Dict[str, Any]] = None 194 if return_internal_states: 195 extra = { 196 "outputs_posteriori": y_post, 197 "errors_posteriori": e_post, 198 "S_d_last": self.S_d.copy(), 199 "gain_last": None if last_gain is None else last_gain.copy(), 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="a_priori", 207 extra=extra, 208 )
Run RLS-Alt adaptation.
Parameters
input_signal:
Input signal x[k].
desired_signal:
Desired signal d[k].
verbose:
If True, prints runtime.
return_internal_states:
If True, includes a posteriori sequences and last internal matrices in result.extra.
Returns
OptimizationResult outputs: A priori output y[k] = w^H x_k. errors: A priori error e[k] = d[k] - y[k]. coefficients: Coefficient history stored in the base class. error_type: "a_priori".
Extra (when return_internal_states=True)
extra["outputs_posteriori"]: A posteriori output y_post[k] using updated w[k+1]. extra["errors_posteriori"]: A posteriori error e_post[k] = d[k] - y_post[k]. extra["S_d_last"]: Final inverse correlation matrix. extra["gain_last"]: Kalman gain-like vector g at last iteration.
25class SMNLMS(AdaptiveFilter): 26 """ 27 Implements the Set-membership Normalized LMS algorithm for complex-valued data. 28 29 Coefficients are updated only when |e(k)| > gamma_bar. (Algorithm 6.1, Diniz) 30 """ 31 supports_complex: bool = True 32 33 gamma_bar: float 34 gamma: float 35 n_coeffs: int 36 37 def __init__( 38 self, 39 filter_order: int, 40 gamma_bar: float, 41 gamma: float, 42 w_init: Optional[Union[np.ndarray, list]] = None, 43 ) -> None: 44 """ 45 Parameters 46 ---------- 47 filter_order: 48 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 49 gamma_bar: 50 Error magnitude threshold for triggering updates. 51 gamma: 52 Regularization factor to avoid division by zero in normalization. 53 w_init: 54 Optional initial coefficient vector. If None, initializes to zeros. 55 """ 56 super().__init__(filter_order=filter_order, w_init=w_init) 57 self.gamma_bar = float(gamma_bar) 58 self.gamma = float(gamma) 59 self.n_coeffs = int(self.filter_order + 1) 60 61 self.n_updates: int = 0 62 63 @validate_input 64 def optimize( 65 self, 66 input_signal: np.ndarray, 67 desired_signal: np.ndarray, 68 verbose: bool = False, 69 return_internal_states: bool = False, 70 ) -> OptimizationResult: 71 """ 72 Executes the SM-NLMS adaptation. 73 74 Parameters 75 ---------- 76 input_signal: 77 Input signal x[k]. 78 desired_signal: 79 Desired signal d[k]. 80 verbose: 81 If True, prints runtime and update stats. 82 return_internal_states: 83 If True, includes additional internal trajectories in result.extra. 84 85 Returns 86 ------- 87 OptimizationResult 88 outputs: 89 A-priori output y[k] = w^H x_k. 90 errors: 91 A-priori error e[k] = d[k] - y[k]. 92 coefficients: 93 History of coefficients stored in the base class. 94 error_type: 95 "a_priori". 96 97 Extra (always) 98 ------------- 99 extra["n_updates"]: 100 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 101 extra["update_mask"]: 102 Boolean array marking which iterations performed updates. 103 104 Extra (when return_internal_states=True) 105 -------------------------------------- 106 extra["mu"]: 107 Trajectory of the SM step-size factor mu[k] (0 when no update). 108 extra["den"]: 109 Denominator trajectory gamma + ||x_k||^2 (0 when no update). 110 """ 111 tic: float = time() 112 113 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 114 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 115 116 n_samples: int = int(x.size) 117 118 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 119 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 120 121 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 122 123 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 124 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 125 126 self.n_updates = 0 127 128 self.regressor = np.asarray(self.regressor, dtype=complex) 129 if self.regressor.size != self.n_coeffs: 130 self.regressor = np.zeros(self.n_coeffs, dtype=complex) 131 132 for k in range(n_samples): 133 self.regressor = np.roll(self.regressor, 1) 134 self.regressor[0] = x[k] 135 136 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 137 ek: complex = complex(d[k] - yk) 138 139 outputs[k] = yk 140 errors[k] = ek 141 142 eabs: float = float(np.abs(ek)) 143 144 if eabs > self.gamma_bar: 145 self.n_updates += 1 146 update_mask[k] = True 147 148 mu: float = float(1.0 - (self.gamma_bar / eabs)) 149 150 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 151 den: float = float(self.gamma + norm_sq) 152 153 if den <= 0.0: 154 den = float(self.gamma + 1e-30) 155 156 self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor) 157 158 if return_internal_states: 159 if mu_track is not None: 160 mu_track[k] = mu 161 if den_track is not None: 162 den_track[k] = den 163 else: 164 if return_internal_states and mu_track is not None: 165 mu_track[k] = 0.0 166 167 self._record_history() 168 169 runtime_s: float = float(time() - tic) 170 if verbose: 171 pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0 172 print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms") 173 174 extra: Dict[str, Any] = { 175 "n_updates": int(self.n_updates), 176 "update_mask": update_mask, 177 } 178 if return_internal_states: 179 extra.update( 180 { 181 "mu": mu_track, 182 "den": den_track, 183 } 184 ) 185 186 return self._pack_results( 187 outputs=outputs, 188 errors=errors, 189 runtime_s=runtime_s, 190 error_type="a_priori", 191 extra=extra, 192 )
Implements the Set-membership Normalized LMS algorithm for complex-valued data.
Coefficients are updated only when |e(k)| > gamma_bar. (Algorithm 6.1, Diniz)
37 def __init__( 38 self, 39 filter_order: int, 40 gamma_bar: float, 41 gamma: float, 42 w_init: Optional[Union[np.ndarray, list]] = None, 43 ) -> None: 44 """ 45 Parameters 46 ---------- 47 filter_order: 48 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 49 gamma_bar: 50 Error magnitude threshold for triggering updates. 51 gamma: 52 Regularization factor to avoid division by zero in normalization. 53 w_init: 54 Optional initial coefficient vector. If None, initializes to zeros. 55 """ 56 super().__init__(filter_order=filter_order, w_init=w_init) 57 self.gamma_bar = float(gamma_bar) 58 self.gamma = float(gamma) 59 self.n_coeffs = int(self.filter_order + 1) 60 61 self.n_updates: int = 0
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. gamma_bar: Error magnitude threshold for triggering updates. gamma: Regularization factor to avoid division by zero in normalization. w_init: Optional initial coefficient vector. If None, initializes to zeros.
63 @validate_input 64 def optimize( 65 self, 66 input_signal: np.ndarray, 67 desired_signal: np.ndarray, 68 verbose: bool = False, 69 return_internal_states: bool = False, 70 ) -> OptimizationResult: 71 """ 72 Executes the SM-NLMS adaptation. 73 74 Parameters 75 ---------- 76 input_signal: 77 Input signal x[k]. 78 desired_signal: 79 Desired signal d[k]. 80 verbose: 81 If True, prints runtime and update stats. 82 return_internal_states: 83 If True, includes additional internal trajectories in result.extra. 84 85 Returns 86 ------- 87 OptimizationResult 88 outputs: 89 A-priori output y[k] = w^H x_k. 90 errors: 91 A-priori error e[k] = d[k] - y[k]. 92 coefficients: 93 History of coefficients stored in the base class. 94 error_type: 95 "a_priori". 96 97 Extra (always) 98 ------------- 99 extra["n_updates"]: 100 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 101 extra["update_mask"]: 102 Boolean array marking which iterations performed updates. 103 104 Extra (when return_internal_states=True) 105 -------------------------------------- 106 extra["mu"]: 107 Trajectory of the SM step-size factor mu[k] (0 when no update). 108 extra["den"]: 109 Denominator trajectory gamma + ||x_k||^2 (0 when no update). 110 """ 111 tic: float = time() 112 113 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 114 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 115 116 n_samples: int = int(x.size) 117 118 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 119 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 120 121 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 122 123 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 124 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 125 126 self.n_updates = 0 127 128 self.regressor = np.asarray(self.regressor, dtype=complex) 129 if self.regressor.size != self.n_coeffs: 130 self.regressor = np.zeros(self.n_coeffs, dtype=complex) 131 132 for k in range(n_samples): 133 self.regressor = np.roll(self.regressor, 1) 134 self.regressor[0] = x[k] 135 136 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 137 ek: complex = complex(d[k] - yk) 138 139 outputs[k] = yk 140 errors[k] = ek 141 142 eabs: float = float(np.abs(ek)) 143 144 if eabs > self.gamma_bar: 145 self.n_updates += 1 146 update_mask[k] = True 147 148 mu: float = float(1.0 - (self.gamma_bar / eabs)) 149 150 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 151 den: float = float(self.gamma + norm_sq) 152 153 if den <= 0.0: 154 den = float(self.gamma + 1e-30) 155 156 self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor) 157 158 if return_internal_states: 159 if mu_track is not None: 160 mu_track[k] = mu 161 if den_track is not None: 162 den_track[k] = den 163 else: 164 if return_internal_states and mu_track is not None: 165 mu_track[k] = 0.0 166 167 self._record_history() 168 169 runtime_s: float = float(time() - tic) 170 if verbose: 171 pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0 172 print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms") 173 174 extra: Dict[str, Any] = { 175 "n_updates": int(self.n_updates), 176 "update_mask": update_mask, 177 } 178 if return_internal_states: 179 extra.update( 180 { 181 "mu": mu_track, 182 "den": den_track, 183 } 184 ) 185 186 return self._pack_results( 187 outputs=outputs, 188 errors=errors, 189 runtime_s=runtime_s, 190 error_type="a_priori", 191 extra=extra, 192 )
Executes the SM-NLMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime and update stats. return_internal_states: If True, includes additional internal trajectories in result.extra.
Returns
OptimizationResult outputs: A-priori output y[k] = w^H x_k. errors: A-priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
Extra (always)
extra["n_updates"]: Number of coefficient updates (iterations where |e(k)| > gamma_bar). extra["update_mask"]: Boolean array marking which iterations performed updates.
Extra (when return_internal_states=True)
extra["mu"]: Trajectory of the SM step-size factor mu[k] (0 when no update). extra["den"]: Denominator trajectory gamma + ||x_k||^2 (0 when no update).
25class SMBNLMS(AdaptiveFilter): 26 """ 27 Implements the Set-membership Binormalized LMS (SM-BNLMS) algorithm for complex-valued data. 28 29 This algorithm is a specific case of SM-AP with L=1, designed to improve 30 convergence speed over SM-NLMS with low computational overhead by reusing 31 the previous regressor. (Algorithm 6.5, Diniz) 32 """ 33 supports_complex: bool = True 34 35 gamma_bar: float 36 gamma: float 37 n_coeffs: int 38 39 def __init__( 40 self, 41 filter_order: int, 42 gamma_bar: float, 43 gamma: float, 44 w_init: Optional[Union[np.ndarray, list]] = None, 45 ) -> None: 46 """ 47 Parameters 48 ---------- 49 filter_order: 50 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 51 gamma_bar: 52 Upper bound for the error magnitude (set-membership threshold). 53 gamma: 54 Regularization factor to avoid division by zero (and stabilize denominator). 55 w_init: 56 Optional initial coefficient vector. If None, initializes to zeros. 57 """ 58 super().__init__(filter_order=filter_order, w_init=w_init) 59 60 self.gamma_bar = float(gamma_bar) 61 self.gamma = float(gamma) 62 self.n_coeffs = int(self.filter_order + 1) 63 64 self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex) 65 66 self.n_updates: int = 0 67 68 @validate_input 69 def optimize( 70 self, 71 input_signal: np.ndarray, 72 desired_signal: np.ndarray, 73 verbose: bool = False, 74 return_internal_states: bool = False, 75 ) -> OptimizationResult: 76 """ 77 Executes the SM-BNLMS adaptation. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal x[k]. 83 desired_signal: 84 Desired signal d[k]. 85 verbose: 86 If True, prints runtime and update count. 87 return_internal_states: 88 If True, includes internal trajectories in result.extra. 89 90 Returns 91 ------- 92 OptimizationResult 93 outputs: 94 A-priori output y[k] = w^H x_k. 95 errors: 96 A-priori error e[k] = d[k] - y[k]. 97 coefficients: 98 History of coefficients stored in the base class. 99 error_type: 100 "a_priori". 101 102 Extra (always) 103 ------------- 104 extra["n_updates"]: 105 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 106 extra["update_mask"]: 107 Boolean array marking which iterations performed updates. 108 109 Extra (when return_internal_states=True) 110 -------------------------------------- 111 extra["mu"]: 112 Trajectory of the SM step-size factor mu[k] (0 when no update). 113 extra["den"]: 114 Denominator trajectory used in lambda1/lambda2 (0 when no update). 115 extra["lambda1"]: 116 Lambda1 trajectory (0 when no update). 117 extra["lambda2"]: 118 Lambda2 trajectory (0 when no update). 119 """ 120 tic: float = time() 121 122 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 123 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 124 125 n_samples: int = int(x.size) 126 n_coeffs: int = int(self.n_coeffs) 127 128 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 129 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 130 131 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 132 133 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 134 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 135 lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 136 lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 137 138 self.n_updates = 0 139 140 self.regressor = np.asarray(self.regressor, dtype=complex) 141 if self.regressor.size != n_coeffs: 142 self.regressor = np.zeros(n_coeffs, dtype=complex) 143 144 self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex) 145 if self.regressor_prev.size != n_coeffs: 146 self.regressor_prev = np.zeros(n_coeffs, dtype=complex) 147 148 for k in range(n_samples): 149 self.regressor_prev = self.regressor.copy() 150 151 self.regressor = np.roll(self.regressor, 1) 152 self.regressor[0] = x[k] 153 154 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 155 ek: complex = complex(d[k] - yk) 156 157 outputs[k] = yk 158 errors[k] = ek 159 160 eabs: float = float(np.abs(ek)) 161 162 if eabs > self.gamma_bar: 163 self.n_updates += 1 164 update_mask[k] = True 165 166 mu: float = float(1.0 - (self.gamma_bar / eabs)) 167 168 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 169 prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev))) 170 cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor)) 171 172 den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2)) 173 174 if den <= 0.0: 175 den = float(self.gamma + 1e-30) 176 177 lambda1: complex = complex((mu * ek * prev_norm_sq) / den) 178 lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den) 179 180 self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev) 181 182 if return_internal_states: 183 if mu_track is not None: 184 mu_track[k] = mu 185 if den_track is not None: 186 den_track[k] = den 187 if lam1_track is not None: 188 lam1_track[k] = lambda1 189 if lam2_track is not None: 190 lam2_track[k] = lambda2 191 else: 192 if return_internal_states and mu_track is not None: 193 mu_track[k] = 0.0 194 195 self._record_history() 196 197 runtime_s: float = float(time() - tic) 198 if verbose: 199 print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms") 200 201 extra: Dict[str, Any] = { 202 "n_updates": int(self.n_updates), 203 "update_mask": update_mask, 204 } 205 if return_internal_states: 206 extra.update( 207 { 208 "mu": mu_track, 209 "den": den_track, 210 "lambda1": lam1_track, 211 "lambda2": lam2_track, 212 } 213 ) 214 215 return self._pack_results( 216 outputs=outputs, 217 errors=errors, 218 runtime_s=runtime_s, 219 error_type="a_priori", 220 extra=extra, 221 )
Implements the Set-membership Binormalized LMS (SM-BNLMS) algorithm for complex-valued data.
This algorithm is a specific case of SM-AP with L=1, designed to improve convergence speed over SM-NLMS with low computational overhead by reusing the previous regressor. (Algorithm 6.5, Diniz)
39 def __init__( 40 self, 41 filter_order: int, 42 gamma_bar: float, 43 gamma: float, 44 w_init: Optional[Union[np.ndarray, list]] = None, 45 ) -> None: 46 """ 47 Parameters 48 ---------- 49 filter_order: 50 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 51 gamma_bar: 52 Upper bound for the error magnitude (set-membership threshold). 53 gamma: 54 Regularization factor to avoid division by zero (and stabilize denominator). 55 w_init: 56 Optional initial coefficient vector. If None, initializes to zeros. 57 """ 58 super().__init__(filter_order=filter_order, w_init=w_init) 59 60 self.gamma_bar = float(gamma_bar) 61 self.gamma = float(gamma) 62 self.n_coeffs = int(self.filter_order + 1) 63 64 self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex) 65 66 self.n_updates: int = 0
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. gamma_bar: Upper bound for the error magnitude (set-membership threshold). gamma: Regularization factor to avoid division by zero (and stabilize denominator). w_init: Optional initial coefficient vector. If None, initializes to zeros.
68 @validate_input 69 def optimize( 70 self, 71 input_signal: np.ndarray, 72 desired_signal: np.ndarray, 73 verbose: bool = False, 74 return_internal_states: bool = False, 75 ) -> OptimizationResult: 76 """ 77 Executes the SM-BNLMS adaptation. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal x[k]. 83 desired_signal: 84 Desired signal d[k]. 85 verbose: 86 If True, prints runtime and update count. 87 return_internal_states: 88 If True, includes internal trajectories in result.extra. 89 90 Returns 91 ------- 92 OptimizationResult 93 outputs: 94 A-priori output y[k] = w^H x_k. 95 errors: 96 A-priori error e[k] = d[k] - y[k]. 97 coefficients: 98 History of coefficients stored in the base class. 99 error_type: 100 "a_priori". 101 102 Extra (always) 103 ------------- 104 extra["n_updates"]: 105 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 106 extra["update_mask"]: 107 Boolean array marking which iterations performed updates. 108 109 Extra (when return_internal_states=True) 110 -------------------------------------- 111 extra["mu"]: 112 Trajectory of the SM step-size factor mu[k] (0 when no update). 113 extra["den"]: 114 Denominator trajectory used in lambda1/lambda2 (0 when no update). 115 extra["lambda1"]: 116 Lambda1 trajectory (0 when no update). 117 extra["lambda2"]: 118 Lambda2 trajectory (0 when no update). 119 """ 120 tic: float = time() 121 122 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 123 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 124 125 n_samples: int = int(x.size) 126 n_coeffs: int = int(self.n_coeffs) 127 128 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 129 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 130 131 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 132 133 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 134 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 135 lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 136 lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 137 138 self.n_updates = 0 139 140 self.regressor = np.asarray(self.regressor, dtype=complex) 141 if self.regressor.size != n_coeffs: 142 self.regressor = np.zeros(n_coeffs, dtype=complex) 143 144 self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex) 145 if self.regressor_prev.size != n_coeffs: 146 self.regressor_prev = np.zeros(n_coeffs, dtype=complex) 147 148 for k in range(n_samples): 149 self.regressor_prev = self.regressor.copy() 150 151 self.regressor = np.roll(self.regressor, 1) 152 self.regressor[0] = x[k] 153 154 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 155 ek: complex = complex(d[k] - yk) 156 157 outputs[k] = yk 158 errors[k] = ek 159 160 eabs: float = float(np.abs(ek)) 161 162 if eabs > self.gamma_bar: 163 self.n_updates += 1 164 update_mask[k] = True 165 166 mu: float = float(1.0 - (self.gamma_bar / eabs)) 167 168 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 169 prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev))) 170 cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor)) 171 172 den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2)) 173 174 if den <= 0.0: 175 den = float(self.gamma + 1e-30) 176 177 lambda1: complex = complex((mu * ek * prev_norm_sq) / den) 178 lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den) 179 180 self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev) 181 182 if return_internal_states: 183 if mu_track is not None: 184 mu_track[k] = mu 185 if den_track is not None: 186 den_track[k] = den 187 if lam1_track is not None: 188 lam1_track[k] = lambda1 189 if lam2_track is not None: 190 lam2_track[k] = lambda2 191 else: 192 if return_internal_states and mu_track is not None: 193 mu_track[k] = 0.0 194 195 self._record_history() 196 197 runtime_s: float = float(time() - tic) 198 if verbose: 199 print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms") 200 201 extra: Dict[str, Any] = { 202 "n_updates": int(self.n_updates), 203 "update_mask": update_mask, 204 } 205 if return_internal_states: 206 extra.update( 207 { 208 "mu": mu_track, 209 "den": den_track, 210 "lambda1": lam1_track, 211 "lambda2": lam2_track, 212 } 213 ) 214 215 return self._pack_results( 216 outputs=outputs, 217 errors=errors, 218 runtime_s=runtime_s, 219 error_type="a_priori", 220 extra=extra, 221 )
Executes the SM-BNLMS adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime and update count. return_internal_states: If True, includes internal trajectories in result.extra.
Returns
OptimizationResult outputs: A-priori output y[k] = w^H x_k. errors: A-priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
Extra (always)
extra["n_updates"]: Number of coefficient updates (iterations where |e(k)| > gamma_bar). extra["update_mask"]: Boolean array marking which iterations performed updates.
Extra (when return_internal_states=True)
extra["mu"]: Trajectory of the SM step-size factor mu[k] (0 when no update). extra["den"]: Denominator trajectory used in lambda1/lambda2 (0 when no update). extra["lambda1"]: Lambda1 trajectory (0 when no update). extra["lambda2"]: Lambda2 trajectory (0 when no update).
26class SMAffineProjection(AdaptiveFilter): 27 """ 28 Implements the Set-membership Affine-Projection (SM-AP) algorithm for complex-valued data. 29 30 This is a supervised algorithm, i.e., it requires both input_signal and desired_signal. 31 """ 32 supports_complex: bool = True 33 34 gamma_bar: float 35 gamma_bar_vector: np.ndarray 36 gamma: float 37 L: int 38 n_coeffs: int 39 40 def __init__( 41 self, 42 filter_order: int, 43 gamma_bar: float, 44 gamma_bar_vector: Union[np.ndarray, list], 45 gamma: float, 46 L: int, 47 w_init: Optional[Union[np.ndarray, list]] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 filter_order: 53 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 54 gamma_bar: 55 Upper bound for the (a-priori) error magnitude used by set-membership criterion. 56 gamma_bar_vector: 57 Target a-posteriori error vector, size (L+1,). (Algorithm-dependent) 58 gamma: 59 Regularization factor for the AP correlation matrix. 60 L: 61 Reuse data factor / constraint length (projection order). 62 w_init: 63 Optional initial coefficient vector. If None, initializes to zeros. 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.gamma_bar = float(gamma_bar) 68 self.gamma = float(gamma) 69 self.L = int(L) 70 71 self.n_coeffs = int(self.filter_order + 1) 72 73 gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel() 74 if gvec.size != (self.L + 1): 75 raise ValueError( 76 f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}" 77 ) 78 self.gamma_bar_vector = gvec.reshape(-1, 1) 79 80 self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 81 82 self.n_updates: int = 0 83 84 @validate_input 85 def optimize( 86 self, 87 input_signal: np.ndarray, 88 desired_signal: np.ndarray, 89 verbose: bool = False, 90 return_internal_states: bool = False, 91 ) -> OptimizationResult: 92 """ 93 Executes the SM-AP adaptation. 94 95 Parameters 96 ---------- 97 input_signal: 98 Input signal x[k]. 99 desired_signal: 100 Desired signal d[k]. 101 verbose: 102 If True, prints runtime and update count. 103 return_internal_states: 104 If True, includes additional internal trajectories in result.extra. 105 106 Returns 107 ------- 108 OptimizationResult 109 outputs: 110 A-priori output y[k]. 111 errors: 112 A-priori error e[k] = d[k] - y[k] (first component of AP error vector). 113 coefficients: 114 History of coefficients stored in the base class. 115 error_type: 116 "a_priori". 117 118 Extra (always) 119 ------------- 120 extra["n_updates"]: 121 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 122 extra["update_mask"]: 123 Boolean array marking which iterations performed updates. 124 125 Extra (when return_internal_states=True) 126 -------------------------------------- 127 extra["errors_vector"]: 128 Full AP a-priori error vector over time, shape (N, L+1). 129 """ 130 tic: float = time() 131 132 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 133 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 134 135 n_samples: int = int(d.size) 136 n_coeffs: int = int(self.n_coeffs) 137 Lp1: int = int(self.L + 1) 138 139 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 140 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 141 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 142 143 errors_vec_track: Optional[np.ndarray] = ( 144 np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None 145 ) 146 147 self.n_updates = 0 148 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 149 150 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 151 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 152 153 for k in range(n_samples): 154 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 155 156 start_idx = k + n_coeffs - 1 157 stop = (k - 1) if (k > 0) else None 158 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 159 160 output_ap_conj = (self.regressor_matrix.conj().T) @ w_current 161 162 desired_slice = prefixed_desired[k + self.L : stop : -1] 163 error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj 164 165 yk = output_ap_conj[0, 0] 166 ek = error_ap_conj[0, 0] 167 168 outputs[k] = yk 169 errors[k] = ek 170 if return_internal_states and errors_vec_track is not None: 171 errors_vec_track[k, :] = error_ap_conj.ravel() 172 173 if np.abs(ek) > self.gamma_bar: 174 self.n_updates += 1 175 update_mask[k] = True 176 177 R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1) 178 b = error_ap_conj - self.gamma_bar_vector.conj() 179 180 try: 181 step = np.linalg.solve(R, b) 182 except np.linalg.LinAlgError: 183 step = np.linalg.pinv(R) @ b 184 185 w_current = w_current + (self.regressor_matrix @ step) 186 187 self.w = w_current.ravel() 188 self._record_history() 189 190 runtime_s: float = float(time() - tic) 191 if verbose: 192 print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms") 193 194 extra: Dict[str, Any] = { 195 "n_updates": int(self.n_updates), 196 "update_mask": update_mask, 197 } 198 if return_internal_states: 199 extra["errors_vector"] = errors_vec_track 200 201 return self._pack_results( 202 outputs=outputs, 203 errors=errors, 204 runtime_s=runtime_s, 205 error_type="a_priori", 206 extra=extra, 207 )
Implements the Set-membership Affine-Projection (SM-AP) algorithm for complex-valued data.
This is a supervised algorithm, i.e., it requires both input_signal and desired_signal.
40 def __init__( 41 self, 42 filter_order: int, 43 gamma_bar: float, 44 gamma_bar_vector: Union[np.ndarray, list], 45 gamma: float, 46 L: int, 47 w_init: Optional[Union[np.ndarray, list]] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 filter_order: 53 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 54 gamma_bar: 55 Upper bound for the (a-priori) error magnitude used by set-membership criterion. 56 gamma_bar_vector: 57 Target a-posteriori error vector, size (L+1,). (Algorithm-dependent) 58 gamma: 59 Regularization factor for the AP correlation matrix. 60 L: 61 Reuse data factor / constraint length (projection order). 62 w_init: 63 Optional initial coefficient vector. If None, initializes to zeros. 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.gamma_bar = float(gamma_bar) 68 self.gamma = float(gamma) 69 self.L = int(L) 70 71 self.n_coeffs = int(self.filter_order + 1) 72 73 gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel() 74 if gvec.size != (self.L + 1): 75 raise ValueError( 76 f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}" 77 ) 78 self.gamma_bar_vector = gvec.reshape(-1, 1) 79 80 self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 81 82 self.n_updates: int = 0
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. gamma_bar: Upper bound for the (a-priori) error magnitude used by set-membership criterion. gamma_bar_vector: Target a-posteriori error vector, size (L+1,). (Algorithm-dependent) gamma: Regularization factor for the AP correlation matrix. L: Reuse data factor / constraint length (projection order). w_init: Optional initial coefficient vector. If None, initializes to zeros.
84 @validate_input 85 def optimize( 86 self, 87 input_signal: np.ndarray, 88 desired_signal: np.ndarray, 89 verbose: bool = False, 90 return_internal_states: bool = False, 91 ) -> OptimizationResult: 92 """ 93 Executes the SM-AP adaptation. 94 95 Parameters 96 ---------- 97 input_signal: 98 Input signal x[k]. 99 desired_signal: 100 Desired signal d[k]. 101 verbose: 102 If True, prints runtime and update count. 103 return_internal_states: 104 If True, includes additional internal trajectories in result.extra. 105 106 Returns 107 ------- 108 OptimizationResult 109 outputs: 110 A-priori output y[k]. 111 errors: 112 A-priori error e[k] = d[k] - y[k] (first component of AP error vector). 113 coefficients: 114 History of coefficients stored in the base class. 115 error_type: 116 "a_priori". 117 118 Extra (always) 119 ------------- 120 extra["n_updates"]: 121 Number of coefficient updates (iterations where |e(k)| > gamma_bar). 122 extra["update_mask"]: 123 Boolean array marking which iterations performed updates. 124 125 Extra (when return_internal_states=True) 126 -------------------------------------- 127 extra["errors_vector"]: 128 Full AP a-priori error vector over time, shape (N, L+1). 129 """ 130 tic: float = time() 131 132 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 133 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 134 135 n_samples: int = int(d.size) 136 n_coeffs: int = int(self.n_coeffs) 137 Lp1: int = int(self.L + 1) 138 139 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 140 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 141 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 142 143 errors_vec_track: Optional[np.ndarray] = ( 144 np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None 145 ) 146 147 self.n_updates = 0 148 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 149 150 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 151 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 152 153 for k in range(n_samples): 154 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 155 156 start_idx = k + n_coeffs - 1 157 stop = (k - 1) if (k > 0) else None 158 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 159 160 output_ap_conj = (self.regressor_matrix.conj().T) @ w_current 161 162 desired_slice = prefixed_desired[k + self.L : stop : -1] 163 error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj 164 165 yk = output_ap_conj[0, 0] 166 ek = error_ap_conj[0, 0] 167 168 outputs[k] = yk 169 errors[k] = ek 170 if return_internal_states and errors_vec_track is not None: 171 errors_vec_track[k, :] = error_ap_conj.ravel() 172 173 if np.abs(ek) > self.gamma_bar: 174 self.n_updates += 1 175 update_mask[k] = True 176 177 R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1) 178 b = error_ap_conj - self.gamma_bar_vector.conj() 179 180 try: 181 step = np.linalg.solve(R, b) 182 except np.linalg.LinAlgError: 183 step = np.linalg.pinv(R) @ b 184 185 w_current = w_current + (self.regressor_matrix @ step) 186 187 self.w = w_current.ravel() 188 self._record_history() 189 190 runtime_s: float = float(time() - tic) 191 if verbose: 192 print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms") 193 194 extra: Dict[str, Any] = { 195 "n_updates": int(self.n_updates), 196 "update_mask": update_mask, 197 } 198 if return_internal_states: 199 extra["errors_vector"] = errors_vec_track 200 201 return self._pack_results( 202 outputs=outputs, 203 errors=errors, 204 runtime_s=runtime_s, 205 error_type="a_priori", 206 extra=extra, 207 )
Executes the SM-AP adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime and update count. return_internal_states: If True, includes additional internal trajectories in result.extra.
Returns
OptimizationResult outputs: A-priori output y[k]. errors: A-priori error e[k] = d[k] - y[k] (first component of AP error vector). coefficients: History of coefficients stored in the base class. error_type: "a_priori".
Extra (always)
extra["n_updates"]: Number of coefficient updates (iterations where |e(k)| > gamma_bar). extra["update_mask"]: Boolean array marking which iterations performed updates.
Extra (when return_internal_states=True)
extra["errors_vector"]: Full AP a-priori error vector over time, shape (N, L+1).
25class LRLSPosteriori(AdaptiveFilter): 26 """ 27 Lattice Recursive Least Squares (LRLS) using a posteriori errors. 28 29 Implements Algorithm 7.1 (Diniz) in a lattice structure (prediction + ladder). 30 31 Library conventions 32 ------------------- 33 - Complex-valued implementation (`supports_complex=True`). 34 - Ladder coefficients are stored in `self.v` (length M+1). 35 - `self.w` mirrors `self.v` and history is recorded via `_record_history()`. 36 """ 37 38 supports_complex: bool = True 39 40 def __init__( 41 self, 42 filter_order: int, 43 lambda_factor: float = 0.99, 44 epsilon: float = 0.1, 45 w_init: Optional[Union[np.ndarray, list]] = None, 46 denom_floor: float = 1e-12, 47 xi_floor: Optional[float] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 filter_order: 53 Number of lattice sections M. Ladder has M+1 coefficients. 54 lambda_factor: 55 Forgetting factor λ. 56 epsilon: 57 Energy initialization / regularization. 58 w_init: 59 Optional initial ladder coefficient vector (length M+1). If None, zeros. 60 denom_floor: 61 Floor used to avoid division by (near) zero in normalization terms. 62 xi_floor: 63 Floor used to keep energies positive (defaults to epsilon). 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.lam = float(lambda_factor) 68 self.epsilon = float(epsilon) 69 self.n_sections = int(filter_order) 70 71 self._tiny = float(denom_floor) 72 self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon) 73 74 self.delta = np.zeros(self.n_sections, dtype=complex) 75 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 76 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 77 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 78 79 if w_init is not None: 80 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 81 if v0.size != self.n_sections + 1: 82 raise ValueError( 83 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 84 ) 85 self.v = v0 86 else: 87 self.v = np.zeros(self.n_sections + 1, dtype=complex) 88 89 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 90 91 self.w = self.v.copy() 92 self.w_history = [] 93 self._record_history() 94 95 @validate_input 96 def optimize( 97 self, 98 input_signal: np.ndarray, 99 desired_signal: np.ndarray, 100 verbose: bool = False, 101 return_internal_states: bool = False, 102 ) -> OptimizationResult: 103 """ 104 Executes LRLS adaptation (a posteriori version) over (x[k], d[k]). 105 106 Returns 107 ------- 108 OptimizationResult 109 outputs: 110 Filter output y[k]. 111 errors: 112 A posteriori error e[k]. 113 coefficients: 114 History of ladder coefficients v (mirrored in self.w_history). 115 error_type: 116 "a_posteriori". 117 118 Extra (when return_internal_states=True) 119 -------------------------------------- 120 extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: 121 Final arrays at the end of adaptation. 122 """ 123 t0 = perf_counter() 124 125 x_in = np.asarray(input_signal, dtype=complex).ravel() 126 d_in = np.asarray(desired_signal, dtype=complex).ravel() 127 128 n_samples = int(d_in.size) 129 outputs = np.zeros(n_samples, dtype=complex) 130 errors = np.zeros(n_samples, dtype=complex) 131 132 for k in range(n_samples): 133 err_f = complex(x_in[k]) 134 135 curr_err_b = np.zeros(self.n_sections + 1, dtype=complex) 136 curr_err_b[0] = x_in[k] 137 138 energy_x = float(np.real(err_f * np.conj(err_f))) 139 self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor) 140 self.xi_b[0] = self.xi_f[0] 141 142 gamma_m = 1.0 143 144 for m in range(self.n_sections): 145 denom_g = max(gamma_m, self._tiny) 146 147 self.delta[m] = ( 148 self.lam * self.delta[m] 149 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 150 ) 151 152 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 153 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 154 155 new_err_f = err_f - kappa_f * self.error_b_prev[m] 156 curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 157 158 self.xi_f[m + 1] = max( 159 self.lam * self.xi_f[m + 1] 160 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g, 161 self._xi_floor, 162 ) 163 self.xi_b[m + 1] = max( 164 self.lam * self.xi_b[m + 1] 165 + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g, 166 self._xi_floor, 167 ) 168 169 denom_xib = self.xi_b[m] + self._tiny 170 energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 171 gamma_m_next = gamma_m - (energy_b_curr / denom_xib) 172 173 gamma_m = max(gamma_m_next, self._tiny) 174 err_f = new_err_f 175 176 e_post = complex(d_in[k]) 177 gamma_ladder = 1.0 178 179 for m in range(self.n_sections + 1): 180 denom_gl = max(gamma_ladder, self._tiny) 181 182 self.delta_v[m] = ( 183 self.lam * self.delta_v[m] 184 + (curr_err_b[m] * np.conj(e_post)) / denom_gl 185 ) 186 187 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 188 189 e_post = e_post - np.conj(self.v[m]) * curr_err_b[m] 190 191 denom_xib_m = self.xi_b[m] + self._tiny 192 energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 193 gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m) 194 gamma_ladder = max(gamma_ladder_next, self._tiny) 195 196 outputs[k] = d_in[k] - e_post 197 errors[k] = e_post 198 199 self.error_b_prev = curr_err_b.copy() 200 201 self.w = self.v.copy() 202 self._record_history() 203 204 runtime_s = float(perf_counter() - t0) 205 if verbose: 206 print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms") 207 208 extra: Optional[Dict[str, Any]] = None 209 if return_internal_states: 210 extra = { 211 "xi_f": self.xi_f.copy(), 212 "xi_b": self.xi_b.copy(), 213 "delta": self.delta.copy(), 214 "delta_v": self.delta_v.copy(), 215 } 216 217 return self._pack_results( 218 outputs=outputs, 219 errors=errors, 220 runtime_s=runtime_s, 221 error_type="a_posteriori", 222 extra=extra, 223 )
Lattice Recursive Least Squares (LRLS) using a posteriori errors.
Implements Algorithm 7.1 (Diniz) in a lattice structure (prediction + ladder).
Library conventions
- Complex-valued implementation (
supports_complex=True). - Ladder coefficients are stored in
self.v(length M+1). self.wmirrorsself.vand history is recorded via_record_history().
40 def __init__( 41 self, 42 filter_order: int, 43 lambda_factor: float = 0.99, 44 epsilon: float = 0.1, 45 w_init: Optional[Union[np.ndarray, list]] = None, 46 denom_floor: float = 1e-12, 47 xi_floor: Optional[float] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 filter_order: 53 Number of lattice sections M. Ladder has M+1 coefficients. 54 lambda_factor: 55 Forgetting factor λ. 56 epsilon: 57 Energy initialization / regularization. 58 w_init: 59 Optional initial ladder coefficient vector (length M+1). If None, zeros. 60 denom_floor: 61 Floor used to avoid division by (near) zero in normalization terms. 62 xi_floor: 63 Floor used to keep energies positive (defaults to epsilon). 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.lam = float(lambda_factor) 68 self.epsilon = float(epsilon) 69 self.n_sections = int(filter_order) 70 71 self._tiny = float(denom_floor) 72 self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon) 73 74 self.delta = np.zeros(self.n_sections, dtype=complex) 75 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 76 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 77 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 78 79 if w_init is not None: 80 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 81 if v0.size != self.n_sections + 1: 82 raise ValueError( 83 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 84 ) 85 self.v = v0 86 else: 87 self.v = np.zeros(self.n_sections + 1, dtype=complex) 88 89 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 90 91 self.w = self.v.copy() 92 self.w_history = [] 93 self._record_history()
Parameters
filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms. xi_floor: Floor used to keep energies positive (defaults to epsilon).
95 @validate_input 96 def optimize( 97 self, 98 input_signal: np.ndarray, 99 desired_signal: np.ndarray, 100 verbose: bool = False, 101 return_internal_states: bool = False, 102 ) -> OptimizationResult: 103 """ 104 Executes LRLS adaptation (a posteriori version) over (x[k], d[k]). 105 106 Returns 107 ------- 108 OptimizationResult 109 outputs: 110 Filter output y[k]. 111 errors: 112 A posteriori error e[k]. 113 coefficients: 114 History of ladder coefficients v (mirrored in self.w_history). 115 error_type: 116 "a_posteriori". 117 118 Extra (when return_internal_states=True) 119 -------------------------------------- 120 extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: 121 Final arrays at the end of adaptation. 122 """ 123 t0 = perf_counter() 124 125 x_in = np.asarray(input_signal, dtype=complex).ravel() 126 d_in = np.asarray(desired_signal, dtype=complex).ravel() 127 128 n_samples = int(d_in.size) 129 outputs = np.zeros(n_samples, dtype=complex) 130 errors = np.zeros(n_samples, dtype=complex) 131 132 for k in range(n_samples): 133 err_f = complex(x_in[k]) 134 135 curr_err_b = np.zeros(self.n_sections + 1, dtype=complex) 136 curr_err_b[0] = x_in[k] 137 138 energy_x = float(np.real(err_f * np.conj(err_f))) 139 self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor) 140 self.xi_b[0] = self.xi_f[0] 141 142 gamma_m = 1.0 143 144 for m in range(self.n_sections): 145 denom_g = max(gamma_m, self._tiny) 146 147 self.delta[m] = ( 148 self.lam * self.delta[m] 149 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 150 ) 151 152 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 153 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 154 155 new_err_f = err_f - kappa_f * self.error_b_prev[m] 156 curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 157 158 self.xi_f[m + 1] = max( 159 self.lam * self.xi_f[m + 1] 160 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g, 161 self._xi_floor, 162 ) 163 self.xi_b[m + 1] = max( 164 self.lam * self.xi_b[m + 1] 165 + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g, 166 self._xi_floor, 167 ) 168 169 denom_xib = self.xi_b[m] + self._tiny 170 energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 171 gamma_m_next = gamma_m - (energy_b_curr / denom_xib) 172 173 gamma_m = max(gamma_m_next, self._tiny) 174 err_f = new_err_f 175 176 e_post = complex(d_in[k]) 177 gamma_ladder = 1.0 178 179 for m in range(self.n_sections + 1): 180 denom_gl = max(gamma_ladder, self._tiny) 181 182 self.delta_v[m] = ( 183 self.lam * self.delta_v[m] 184 + (curr_err_b[m] * np.conj(e_post)) / denom_gl 185 ) 186 187 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 188 189 e_post = e_post - np.conj(self.v[m]) * curr_err_b[m] 190 191 denom_xib_m = self.xi_b[m] + self._tiny 192 energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 193 gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m) 194 gamma_ladder = max(gamma_ladder_next, self._tiny) 195 196 outputs[k] = d_in[k] - e_post 197 errors[k] = e_post 198 199 self.error_b_prev = curr_err_b.copy() 200 201 self.w = self.v.copy() 202 self._record_history() 203 204 runtime_s = float(perf_counter() - t0) 205 if verbose: 206 print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms") 207 208 extra: Optional[Dict[str, Any]] = None 209 if return_internal_states: 210 extra = { 211 "xi_f": self.xi_f.copy(), 212 "xi_b": self.xi_b.copy(), 213 "delta": self.delta.copy(), 214 "delta_v": self.delta_v.copy(), 215 } 216 217 return self._pack_results( 218 outputs=outputs, 219 errors=errors, 220 runtime_s=runtime_s, 221 error_type="a_posteriori", 222 extra=extra, 223 )
Executes LRLS adaptation (a posteriori version) over (x[k], d[k]).
Returns
OptimizationResult outputs: Filter output y[k]. errors: A posteriori error e[k]. coefficients: History of ladder coefficients v (mirrored in self.w_history). error_type: "a_posteriori".
Extra (when return_internal_states=True)
extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: Final arrays at the end of adaptation.
28class LRLSErrorFeedback(AdaptiveFilter): 29 """ 30 Lattice Recursive Least Squares with Error Feedback (LRLS-EF). 31 32 Implements Algorithm 7.5 from: 33 P. S. R. Diniz, "Adaptive Filtering: Algorithms and Practical Implementation". 34 35 Structure overview 36 ------------------ 37 The LRLS-EF algorithm combines: 38 1) A lattice prediction stage (forward/backward errors and reflection updates). 39 2) An error-feedback ladder stage (joint process / ladder weights). 40 41 Library conventions 42 ------------------- 43 - Complex-valued implementation (`supports_complex=True`). 44 - The ladder coefficient vector is `self.v` (length M+1). 45 - For compatibility with the base class: 46 * `self.w` mirrors `self.v` at each iteration. 47 * coefficient history is stored via `self._record_history()`. 48 """ 49 50 supports_complex: bool = True 51 52 lam: float 53 epsilon: float 54 n_sections: int 55 safe_eps: float 56 57 delta: np.ndarray 58 xi_f: np.ndarray 59 xi_b: np.ndarray 60 error_b_prev: np.ndarray 61 62 v: np.ndarray 63 delta_v: np.ndarray 64 65 def __init__( 66 self, 67 filter_order: int, 68 lambda_factor: float = 0.99, 69 epsilon: float = 0.1, 70 w_init: Optional[Union[np.ndarray, list]] = None, 71 safe_eps: float = 1e-12, 72 ) -> None: 73 """ 74 Parameters 75 ---------- 76 filter_order: 77 Lattice order M (number of sections). Ladder has M+1 coefficients. 78 lambda_factor: 79 Forgetting factor λ. 80 epsilon: 81 Regularization/initialization constant for energies. 82 w_init: 83 Optional initial ladder coefficients (length M+1). If None, zeros. 84 safe_eps: 85 Small positive floor used to avoid division by (near) zero. 86 """ 87 super().__init__(filter_order=filter_order, w_init=w_init) 88 89 self.lam = float(lambda_factor) 90 self.epsilon = float(epsilon) 91 self.n_sections = int(filter_order) 92 self.safe_eps = float(safe_eps) 93 94 self.delta = np.zeros(self.n_sections + 1, dtype=complex) 95 96 self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 97 self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 98 99 self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex) 100 101 if w_init is not None: 102 v0 = np.asarray(w_init, dtype=complex).ravel() 103 if v0.size != self.n_sections + 1: 104 raise ValueError( 105 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 106 ) 107 self.v = v0 108 else: 109 self.v = np.zeros(self.n_sections + 1, dtype=complex) 110 111 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 112 113 self.w = self.v.copy() 114 self.w_history = [] 115 self._record_history() 116 117 @validate_input 118 def optimize( 119 self, 120 input_signal: np.ndarray, 121 desired_signal: np.ndarray, 122 verbose: bool = False, 123 return_internal_states: bool = False, 124 ) -> OptimizationResult: 125 """ 126 Executes LRLS-EF adaptation over (x[k], d[k]). 127 128 Parameters 129 ---------- 130 input_signal: 131 Input sequence x[k]. 132 desired_signal: 133 Desired sequence d[k]. 134 verbose: 135 If True, prints runtime. 136 return_internal_states: 137 If True, includes selected internal states in `result.extra`. 138 139 Returns 140 ------- 141 OptimizationResult 142 outputs: 143 Estimated output y[k]. 144 errors: 145 Output error e[k] = d[k] - y[k]. 146 coefficients: 147 History of ladder coefficients v (mirrored in `self.w_history`). 148 error_type: 149 "output_error". 150 151 Extra (when return_internal_states=True) 152 -------------------------------------- 153 extra["xi_f"], extra["xi_b"]: 154 Final forward/backward energies (length M+2). 155 extra["delta"]: 156 Final delta vector (length M+1). 157 extra["delta_v"]: 158 Final delta_v vector (length M+1). 159 """ 160 tic: float = time() 161 162 x_in = np.asarray(input_signal, dtype=complex).ravel() 163 d_in = np.asarray(desired_signal, dtype=complex).ravel() 164 165 n_samples = int(d_in.size) 166 outputs = np.zeros(n_samples, dtype=complex) 167 errors = np.zeros(n_samples, dtype=complex) 168 169 eps = self.safe_eps 170 171 for k in range(n_samples): 172 err_f = complex(x_in[k]) 173 174 curr_b = np.zeros(self.n_sections + 2, dtype=complex) 175 curr_b[0] = x_in[k] 176 177 energy_x = float(np.real(x_in[k] * np.conj(x_in[k]))) 178 self.xi_f[0] = self.lam * self.xi_f[0] + energy_x 179 self.xi_b[0] = self.xi_f[0] 180 181 g = 1.0 182 183 for m in range(self.n_sections + 1): 184 denom_g = max(g, eps) 185 186 self.delta[m] = ( 187 self.lam * self.delta[m] 188 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 189 ) 190 191 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps) 192 kappa_b = self.delta[m] / (self.xi_f[m] + eps) 193 194 new_err_f = err_f - kappa_f * self.error_b_prev[m] 195 curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 196 197 self.xi_f[m + 1] = ( 198 self.lam * self.xi_f[m + 1] 199 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g 200 ) 201 self.xi_b[m + 1] = ( 202 self.lam * self.xi_b[m + 1] 203 + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g 204 ) 205 206 energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 207 g = g - (energy_b_curr / (self.xi_b[m] + eps)) 208 g = max(g, eps) 209 210 err_f = new_err_f 211 212 y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1])) 213 outputs[k] = y_k 214 e_k = complex(d_in[k] - y_k) 215 errors[k] = e_k 216 217 g_ladder = 1.0 218 for m in range(self.n_sections + 1): 219 denom_gl = max(g_ladder, eps) 220 221 self.delta_v[m] = ( 222 self.lam * self.delta_v[m] 223 + (curr_b[m] * np.conj(d_in[k])) / denom_gl 224 ) 225 226 self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps) 227 228 energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 229 g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps)) 230 g_ladder = max(g_ladder, eps) 231 232 self.error_b_prev = curr_b 233 234 self.w = self.v.copy() 235 self._record_history() 236 237 runtime_s = float(time() - tic) 238 if verbose: 239 print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms") 240 241 extra: Optional[Dict[str, Any]] = None 242 if return_internal_states: 243 extra = { 244 "xi_f": self.xi_f.copy(), 245 "xi_b": self.xi_b.copy(), 246 "delta": self.delta.copy(), 247 "delta_v": self.delta_v.copy(), 248 } 249 250 return self._pack_results( 251 outputs=outputs, 252 errors=errors, 253 runtime_s=runtime_s, 254 error_type="output_error", 255 extra=extra, 256 )
Lattice Recursive Least Squares with Error Feedback (LRLS-EF).
Implements Algorithm 7.5 from: P. S. R. Diniz, "Adaptive Filtering: Algorithms and Practical Implementation".
Structure overview
The LRLS-EF algorithm combines: 1) A lattice prediction stage (forward/backward errors and reflection updates). 2) An error-feedback ladder stage (joint process / ladder weights).
Library conventions
- Complex-valued implementation (
supports_complex=True). - The ladder coefficient vector is
self.v(length M+1). - For compatibility with the base class:
self.wmirrorsself.vat each iteration.- coefficient history is stored via
self._record_history().
65 def __init__( 66 self, 67 filter_order: int, 68 lambda_factor: float = 0.99, 69 epsilon: float = 0.1, 70 w_init: Optional[Union[np.ndarray, list]] = None, 71 safe_eps: float = 1e-12, 72 ) -> None: 73 """ 74 Parameters 75 ---------- 76 filter_order: 77 Lattice order M (number of sections). Ladder has M+1 coefficients. 78 lambda_factor: 79 Forgetting factor λ. 80 epsilon: 81 Regularization/initialization constant for energies. 82 w_init: 83 Optional initial ladder coefficients (length M+1). If None, zeros. 84 safe_eps: 85 Small positive floor used to avoid division by (near) zero. 86 """ 87 super().__init__(filter_order=filter_order, w_init=w_init) 88 89 self.lam = float(lambda_factor) 90 self.epsilon = float(epsilon) 91 self.n_sections = int(filter_order) 92 self.safe_eps = float(safe_eps) 93 94 self.delta = np.zeros(self.n_sections + 1, dtype=complex) 95 96 self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 97 self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 98 99 self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex) 100 101 if w_init is not None: 102 v0 = np.asarray(w_init, dtype=complex).ravel() 103 if v0.size != self.n_sections + 1: 104 raise ValueError( 105 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 106 ) 107 self.v = v0 108 else: 109 self.v = np.zeros(self.n_sections + 1, dtype=complex) 110 111 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 112 113 self.w = self.v.copy() 114 self.w_history = [] 115 self._record_history()
Parameters
filter_order: Lattice order M (number of sections). Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization/initialization constant for energies. w_init: Optional initial ladder coefficients (length M+1). If None, zeros. safe_eps: Small positive floor used to avoid division by (near) zero.
117 @validate_input 118 def optimize( 119 self, 120 input_signal: np.ndarray, 121 desired_signal: np.ndarray, 122 verbose: bool = False, 123 return_internal_states: bool = False, 124 ) -> OptimizationResult: 125 """ 126 Executes LRLS-EF adaptation over (x[k], d[k]). 127 128 Parameters 129 ---------- 130 input_signal: 131 Input sequence x[k]. 132 desired_signal: 133 Desired sequence d[k]. 134 verbose: 135 If True, prints runtime. 136 return_internal_states: 137 If True, includes selected internal states in `result.extra`. 138 139 Returns 140 ------- 141 OptimizationResult 142 outputs: 143 Estimated output y[k]. 144 errors: 145 Output error e[k] = d[k] - y[k]. 146 coefficients: 147 History of ladder coefficients v (mirrored in `self.w_history`). 148 error_type: 149 "output_error". 150 151 Extra (when return_internal_states=True) 152 -------------------------------------- 153 extra["xi_f"], extra["xi_b"]: 154 Final forward/backward energies (length M+2). 155 extra["delta"]: 156 Final delta vector (length M+1). 157 extra["delta_v"]: 158 Final delta_v vector (length M+1). 159 """ 160 tic: float = time() 161 162 x_in = np.asarray(input_signal, dtype=complex).ravel() 163 d_in = np.asarray(desired_signal, dtype=complex).ravel() 164 165 n_samples = int(d_in.size) 166 outputs = np.zeros(n_samples, dtype=complex) 167 errors = np.zeros(n_samples, dtype=complex) 168 169 eps = self.safe_eps 170 171 for k in range(n_samples): 172 err_f = complex(x_in[k]) 173 174 curr_b = np.zeros(self.n_sections + 2, dtype=complex) 175 curr_b[0] = x_in[k] 176 177 energy_x = float(np.real(x_in[k] * np.conj(x_in[k]))) 178 self.xi_f[0] = self.lam * self.xi_f[0] + energy_x 179 self.xi_b[0] = self.xi_f[0] 180 181 g = 1.0 182 183 for m in range(self.n_sections + 1): 184 denom_g = max(g, eps) 185 186 self.delta[m] = ( 187 self.lam * self.delta[m] 188 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 189 ) 190 191 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps) 192 kappa_b = self.delta[m] / (self.xi_f[m] + eps) 193 194 new_err_f = err_f - kappa_f * self.error_b_prev[m] 195 curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 196 197 self.xi_f[m + 1] = ( 198 self.lam * self.xi_f[m + 1] 199 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g 200 ) 201 self.xi_b[m + 1] = ( 202 self.lam * self.xi_b[m + 1] 203 + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g 204 ) 205 206 energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 207 g = g - (energy_b_curr / (self.xi_b[m] + eps)) 208 g = max(g, eps) 209 210 err_f = new_err_f 211 212 y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1])) 213 outputs[k] = y_k 214 e_k = complex(d_in[k] - y_k) 215 errors[k] = e_k 216 217 g_ladder = 1.0 218 for m in range(self.n_sections + 1): 219 denom_gl = max(g_ladder, eps) 220 221 self.delta_v[m] = ( 222 self.lam * self.delta_v[m] 223 + (curr_b[m] * np.conj(d_in[k])) / denom_gl 224 ) 225 226 self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps) 227 228 energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 229 g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps)) 230 g_ladder = max(g_ladder, eps) 231 232 self.error_b_prev = curr_b 233 234 self.w = self.v.copy() 235 self._record_history() 236 237 runtime_s = float(time() - tic) 238 if verbose: 239 print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms") 240 241 extra: Optional[Dict[str, Any]] = None 242 if return_internal_states: 243 extra = { 244 "xi_f": self.xi_f.copy(), 245 "xi_b": self.xi_b.copy(), 246 "delta": self.delta.copy(), 247 "delta_v": self.delta_v.copy(), 248 } 249 250 return self._pack_results( 251 outputs=outputs, 252 errors=errors, 253 runtime_s=runtime_s, 254 error_type="output_error", 255 extra=extra, 256 )
Executes LRLS-EF adaptation over (x[k], d[k]).
Parameters
input_signal:
Input sequence x[k].
desired_signal:
Desired sequence d[k].
verbose:
If True, prints runtime.
return_internal_states:
If True, includes selected internal states in result.extra.
Returns
OptimizationResult
outputs:
Estimated output y[k].
errors:
Output error e[k] = d[k] - y[k].
coefficients:
History of ladder coefficients v (mirrored in self.w_history).
error_type:
"output_error".
Extra (when return_internal_states=True)
extra["xi_f"], extra["xi_b"]: Final forward/backward energies (length M+2). extra["delta"]: Final delta vector (length M+1). extra["delta_v"]: Final delta_v vector (length M+1).
25class LRLSPriori(AdaptiveFilter): 26 """ 27 Lattice Recursive Least Squares (LRLS) using a priori errors. 28 29 Implements Algorithm 7.4 (Diniz) in a lattice (prediction + ladder) structure. 30 31 Library conventions 32 ------------------- 33 - Complex arithmetic (`supports_complex=True`). 34 - Ladder coefficients are stored in `self.v` (length M+1). 35 - For consistency with the library base class: 36 * `self.w` mirrors `self.v` 37 * `self._record_history()` is called each iteration 38 * coefficients history is available as `result.coefficients` 39 """ 40 41 supports_complex: bool = True 42 43 def __init__( 44 self, 45 filter_order: int, 46 lambda_factor: float = 0.99, 47 epsilon: float = 0.1, 48 w_init: Optional[Union[np.ndarray, list]] = None, 49 denom_floor: float = 1e-12, 50 ) -> None: 51 """ 52 Parameters 53 ---------- 54 filter_order: 55 Number of lattice sections M. Ladder has M+1 coefficients. 56 lambda_factor: 57 Forgetting factor λ. 58 epsilon: 59 Energy initialization / regularization. 60 w_init: 61 Optional initial ladder coefficient vector (length M+1). If None, zeros. 62 denom_floor: 63 Floor used to avoid division by (near) zero in normalization terms. 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.lam = float(lambda_factor) 68 self.epsilon = float(epsilon) 69 self.n_sections = int(filter_order) 70 self._tiny = float(denom_floor) 71 72 self.delta = np.zeros(self.n_sections, dtype=complex) 73 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 74 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 75 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 76 77 if w_init is not None: 78 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 79 if v0.size != self.n_sections + 1: 80 raise ValueError( 81 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 82 ) 83 self.v = v0 84 else: 85 self.v = np.zeros(self.n_sections + 1, dtype=complex) 86 87 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 88 89 # Mirror to base API 90 self.w = self.v.copy() 91 self.w_history = [] 92 self._record_history() 93 94 @validate_input 95 def optimize( 96 self, 97 input_signal: np.ndarray, 98 desired_signal: np.ndarray, 99 verbose: bool = False, 100 return_internal_states: bool = False, 101 ) -> OptimizationResult: 102 """ 103 Executes LRLS adaptation (a priori version) over (x[k], d[k]). 104 105 Returns 106 ------- 107 OptimizationResult 108 outputs: 109 Filter output y[k]. 110 errors: 111 A priori error e[k]. 112 coefficients: 113 History of ladder coefficients v (mirrored in `self.w_history`). 114 error_type: 115 "a_priori". 116 117 Extra (when return_internal_states=True) 118 -------------------------------------- 119 extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: 120 Final arrays at the end of adaptation. 121 """ 122 t0 = perf_counter() 123 124 # validate_input already normalizes to 1D and matches lengths. 125 # Force complex to respect supports_complex=True (even if x/d are real). 126 x_in = np.asarray(input_signal, dtype=complex).ravel() 127 d_in = np.asarray(desired_signal, dtype=complex).ravel() 128 129 n_samples = int(d_in.size) 130 outputs = np.zeros(n_samples, dtype=complex) 131 errors = np.zeros(n_samples, dtype=complex) 132 133 for k in range(n_samples): 134 alpha_f = complex(x_in[k]) 135 136 alpha_b = np.zeros(self.n_sections + 1, dtype=complex) 137 alpha_b[0] = x_in[k] 138 139 gamma = 1.0 140 gamma_orders = np.ones(self.n_sections + 1, dtype=float) 141 142 # ------------------------- 143 # Lattice stage (a priori) 144 # ------------------------- 145 for m in range(self.n_sections): 146 gamma_orders[m] = gamma 147 denom_g = max(gamma, self._tiny) 148 149 self.delta[m] = ( 150 self.lam * self.delta[m] 151 + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g 152 ) 153 154 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 155 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 156 157 alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m] 158 alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f 159 160 # Energy updates (kept as in your code, with safe denominators) 161 self.xi_f[m] = ( 162 self.lam * self.xi_f[m] 163 + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g 164 ) 165 self.xi_b[m] = ( 166 self.lam * self.xi_b[m] 167 + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g 168 ) 169 170 denom_xib = self.xi_b[m] + self._tiny 171 gamma_next = gamma - ( 172 float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib 173 ) 174 gamma = max(gamma_next, self._tiny) 175 alpha_f = alpha_f_next 176 177 gamma_orders[self.n_sections] = gamma 178 self.xi_f[self.n_sections] = ( 179 self.lam * self.xi_f[self.n_sections] 180 + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny) 181 ) 182 self.xi_b[self.n_sections] = ( 183 self.lam * self.xi_b[self.n_sections] 184 + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections]))) 185 / max(gamma, self._tiny) 186 ) 187 188 # ------------------------- 189 # Ladder stage (a priori) 190 # ------------------------- 191 alpha_e = complex(d_in[k]) 192 193 for m in range(self.n_sections + 1): 194 denom_go = max(gamma_orders[m], self._tiny) 195 196 self.delta_v[m] = ( 197 self.lam * self.delta_v[m] 198 + (alpha_b[m] * np.conj(alpha_e)) / denom_go 199 ) 200 201 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 202 alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m] 203 204 e_k = alpha_e * gamma 205 errors[k] = e_k 206 outputs[k] = d_in[k] - e_k 207 208 self.error_b_prev = alpha_b.copy() 209 210 # Mirror ladder coeffs into base API + record history 211 self.w = self.v.copy() 212 self._record_history() 213 214 runtime_s = float(perf_counter() - t0) 215 if verbose: 216 print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms") 217 218 extra: Optional[Dict[str, Any]] = None 219 if return_internal_states: 220 extra = { 221 "xi_f": self.xi_f.copy(), 222 "xi_b": self.xi_b.copy(), 223 "delta": self.delta.copy(), 224 "delta_v": self.delta_v.copy(), 225 } 226 227 return self._pack_results( 228 outputs=outputs, 229 errors=errors, 230 runtime_s=runtime_s, 231 error_type="a_priori", 232 extra=extra, 233 )
Lattice Recursive Least Squares (LRLS) using a priori errors.
Implements Algorithm 7.4 (Diniz) in a lattice (prediction + ladder) structure.
Library conventions
- Complex arithmetic (
supports_complex=True). - Ladder coefficients are stored in
self.v(length M+1). - For consistency with the library base class:
self.wmirrorsself.vself._record_history()is called each iteration- coefficients history is available as
result.coefficients
43 def __init__( 44 self, 45 filter_order: int, 46 lambda_factor: float = 0.99, 47 epsilon: float = 0.1, 48 w_init: Optional[Union[np.ndarray, list]] = None, 49 denom_floor: float = 1e-12, 50 ) -> None: 51 """ 52 Parameters 53 ---------- 54 filter_order: 55 Number of lattice sections M. Ladder has M+1 coefficients. 56 lambda_factor: 57 Forgetting factor λ. 58 epsilon: 59 Energy initialization / regularization. 60 w_init: 61 Optional initial ladder coefficient vector (length M+1). If None, zeros. 62 denom_floor: 63 Floor used to avoid division by (near) zero in normalization terms. 64 """ 65 super().__init__(filter_order=filter_order, w_init=w_init) 66 67 self.lam = float(lambda_factor) 68 self.epsilon = float(epsilon) 69 self.n_sections = int(filter_order) 70 self._tiny = float(denom_floor) 71 72 self.delta = np.zeros(self.n_sections, dtype=complex) 73 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 74 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 75 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 76 77 if w_init is not None: 78 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 79 if v0.size != self.n_sections + 1: 80 raise ValueError( 81 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 82 ) 83 self.v = v0 84 else: 85 self.v = np.zeros(self.n_sections + 1, dtype=complex) 86 87 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 88 89 # Mirror to base API 90 self.w = self.v.copy() 91 self.w_history = [] 92 self._record_history()
Parameters
filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms.
94 @validate_input 95 def optimize( 96 self, 97 input_signal: np.ndarray, 98 desired_signal: np.ndarray, 99 verbose: bool = False, 100 return_internal_states: bool = False, 101 ) -> OptimizationResult: 102 """ 103 Executes LRLS adaptation (a priori version) over (x[k], d[k]). 104 105 Returns 106 ------- 107 OptimizationResult 108 outputs: 109 Filter output y[k]. 110 errors: 111 A priori error e[k]. 112 coefficients: 113 History of ladder coefficients v (mirrored in `self.w_history`). 114 error_type: 115 "a_priori". 116 117 Extra (when return_internal_states=True) 118 -------------------------------------- 119 extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: 120 Final arrays at the end of adaptation. 121 """ 122 t0 = perf_counter() 123 124 # validate_input already normalizes to 1D and matches lengths. 125 # Force complex to respect supports_complex=True (even if x/d are real). 126 x_in = np.asarray(input_signal, dtype=complex).ravel() 127 d_in = np.asarray(desired_signal, dtype=complex).ravel() 128 129 n_samples = int(d_in.size) 130 outputs = np.zeros(n_samples, dtype=complex) 131 errors = np.zeros(n_samples, dtype=complex) 132 133 for k in range(n_samples): 134 alpha_f = complex(x_in[k]) 135 136 alpha_b = np.zeros(self.n_sections + 1, dtype=complex) 137 alpha_b[0] = x_in[k] 138 139 gamma = 1.0 140 gamma_orders = np.ones(self.n_sections + 1, dtype=float) 141 142 # ------------------------- 143 # Lattice stage (a priori) 144 # ------------------------- 145 for m in range(self.n_sections): 146 gamma_orders[m] = gamma 147 denom_g = max(gamma, self._tiny) 148 149 self.delta[m] = ( 150 self.lam * self.delta[m] 151 + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g 152 ) 153 154 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 155 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 156 157 alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m] 158 alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f 159 160 # Energy updates (kept as in your code, with safe denominators) 161 self.xi_f[m] = ( 162 self.lam * self.xi_f[m] 163 + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g 164 ) 165 self.xi_b[m] = ( 166 self.lam * self.xi_b[m] 167 + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g 168 ) 169 170 denom_xib = self.xi_b[m] + self._tiny 171 gamma_next = gamma - ( 172 float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib 173 ) 174 gamma = max(gamma_next, self._tiny) 175 alpha_f = alpha_f_next 176 177 gamma_orders[self.n_sections] = gamma 178 self.xi_f[self.n_sections] = ( 179 self.lam * self.xi_f[self.n_sections] 180 + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny) 181 ) 182 self.xi_b[self.n_sections] = ( 183 self.lam * self.xi_b[self.n_sections] 184 + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections]))) 185 / max(gamma, self._tiny) 186 ) 187 188 # ------------------------- 189 # Ladder stage (a priori) 190 # ------------------------- 191 alpha_e = complex(d_in[k]) 192 193 for m in range(self.n_sections + 1): 194 denom_go = max(gamma_orders[m], self._tiny) 195 196 self.delta_v[m] = ( 197 self.lam * self.delta_v[m] 198 + (alpha_b[m] * np.conj(alpha_e)) / denom_go 199 ) 200 201 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 202 alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m] 203 204 e_k = alpha_e * gamma 205 errors[k] = e_k 206 outputs[k] = d_in[k] - e_k 207 208 self.error_b_prev = alpha_b.copy() 209 210 # Mirror ladder coeffs into base API + record history 211 self.w = self.v.copy() 212 self._record_history() 213 214 runtime_s = float(perf_counter() - t0) 215 if verbose: 216 print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms") 217 218 extra: Optional[Dict[str, Any]] = None 219 if return_internal_states: 220 extra = { 221 "xi_f": self.xi_f.copy(), 222 "xi_b": self.xi_b.copy(), 223 "delta": self.delta.copy(), 224 "delta_v": self.delta_v.copy(), 225 } 226 227 return self._pack_results( 228 outputs=outputs, 229 errors=errors, 230 runtime_s=runtime_s, 231 error_type="a_priori", 232 extra=extra, 233 )
Executes LRLS adaptation (a priori version) over (x[k], d[k]).
Returns
OptimizationResult
outputs:
Filter output y[k].
errors:
A priori error e[k].
coefficients:
History of ladder coefficients v (mirrored in self.w_history).
error_type:
"a_priori".
Extra (when return_internal_states=True)
extra["xi_f"], extra["xi_b"], extra["delta"], extra["delta_v"]: Final arrays at the end of adaptation.
27class NormalizedLRLS(AdaptiveFilter): 28 """ 29 Normalized Lattice RLS (NLRLS) algorithm based on a posteriori error. 30 31 Implements Algorithm 7.6 (Diniz). The goal of the normalized lattice recursion 32 is improved numerical robustness: internal normalized variables (errors and 33 reflection-like coefficients) are designed to be magnitude-bounded by 1. 34 35 Library conventions 36 ------------------- 37 - Complex-valued implementation (supports_complex=True). 38 - For API consistency, we expose rho_v (length M+1) as the "coefficient vector": 39 * self.w mirrors self.rho_v 40 * self.w_history stores rho_v trajectories 41 * optimize returns OptimizationResult with coefficients stacked from w_history 42 """ 43 44 supports_complex: bool = True 45 46 def __init__( 47 self, 48 filter_order: int, 49 lambda_factor: float = 0.99, 50 epsilon: float = 1e-6, 51 w_init: Optional[Union[np.ndarray, list]] = None, 52 denom_floor: float = 1e-12, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 Number of lattice sections M. The estimation stage uses M+1 coefficients. 59 lambda_factor: 60 Forgetting factor λ. 61 epsilon: 62 Regularization used in normalizations and clipping. 63 w_init: 64 Optional initialization for rho_v (length M+1). If None, zeros. 65 denom_floor: 66 Extra floor for denominators / sqrt protections. 67 """ 68 super().__init__(filter_order=filter_order, w_init=w_init) 69 70 self.lam = float(lambda_factor) 71 self.epsilon = float(epsilon) 72 self.n_sections = int(filter_order) 73 self._tiny = float(denom_floor) 74 75 self.rho = np.zeros(self.n_sections, dtype=complex) 76 77 if w_init is not None: 78 rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1) 79 if rho_v0.size != self.n_sections + 1: 80 raise ValueError( 81 f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}" 82 ) 83 self.rho_v = rho_v0 84 else: 85 self.rho_v = np.zeros(self.n_sections + 1, dtype=complex) 86 87 self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 88 self.xi_half = float(np.sqrt(self.epsilon)) 89 90 self.w = self.rho_v.copy() 91 self.w_history = [] 92 self._record_history() 93 94 @staticmethod 95 def _safe_sqrt(value: float) -> float: 96 """ 97 Computes sqrt(max(value, 0.0)) to avoid negative arguments due to rounding. 98 """ 99 return float(np.sqrt(max(0.0, float(value)))) 100 101 @validate_input 102 def optimize( 103 self, 104 input_signal: np.ndarray, 105 desired_signal: np.ndarray, 106 verbose: bool = False, 107 return_internal_states: bool = False, 108 ) -> OptimizationResult: 109 """ 110 Runs the Normalized LRLS adaptation. 111 112 Returns 113 ------- 114 OptimizationResult 115 outputs: 116 Estimated output y[k]. 117 errors: 118 A posteriori error e[k]. 119 coefficients: 120 History of rho_v (stacked from w_history). 121 error_type: 122 "a_posteriori". 123 124 Extra (when return_internal_states=True) 125 -------------------------------------- 126 extra["rho"]: 127 Final rho vector (length M). 128 extra["rho_v"]: 129 Final rho_v vector (length M+1). 130 extra["xi_half"]: 131 Final xi_half scalar. 132 """ 133 t0 = perf_counter() 134 135 # validate_input already normalizes to 1D and matches lengths. 136 # Force complex to respect supports_complex=True (even if x/d are real). 137 x_in = np.asarray(input_signal, dtype=complex).ravel() 138 d_in = np.asarray(desired_signal, dtype=complex).ravel() 139 140 n_samples = int(d_in.size) 141 outputs = np.zeros(n_samples, dtype=complex) 142 errors = np.zeros(n_samples, dtype=complex) 143 144 sqrt_lam = float(np.sqrt(self.lam)) 145 146 for k in range(n_samples): 147 # Update xi_half (sqrt energy) 148 xi_sq = float(self.xi_half**2) 149 xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2)) 150 self.xi_half = self._safe_sqrt(xi_sq) 151 152 denom_x = float(self.xi_half + self.epsilon) 153 bar_f = x_in[k] / denom_x 154 155 abs_bf = np.abs(bar_f) 156 if abs_bf > 1.0: 157 bar_f = bar_f / abs_bf 158 159 bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex) 160 bar_b_curr[0] = bar_f 161 162 # ------------------------- 163 # Prediction stage 164 # ------------------------- 165 for m in range(self.n_sections): 166 cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2)) 167 cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2)) 168 169 self.rho[m] = ( 170 (sqrt_lam * cos_f * cos_b_prev * self.rho[m]) 171 + (np.conj(bar_f) * self.bar_b_prev[m]) 172 ) 173 174 abs_rho = np.abs(self.rho[m]) 175 if abs_rho >= 1.0: 176 self.rho[m] = self.rho[m] / (abs_rho + self.epsilon) 177 178 cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2)) 179 180 denom_f = float((cos_rho * cos_b_prev) + self.epsilon) 181 denom_b = float((cos_rho * cos_f) + self.epsilon) 182 183 f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f 184 b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b 185 186 bar_f = f_next 187 bar_b_curr[m + 1] = b_next 188 189 # ------------------------- 190 # Estimation stage 191 # ------------------------- 192 bar_e = d_in[k] / float(self.xi_half + self.epsilon) 193 abs_be = np.abs(bar_e) 194 if abs_be > 1.0: 195 bar_e = bar_e / abs_be 196 197 for m in range(self.n_sections + 1): 198 cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2)) 199 cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2)) 200 201 self.rho_v[m] = ( 202 (sqrt_lam * cos_e * cos_b * self.rho_v[m]) 203 + (np.conj(bar_e) * bar_b_curr[m]) 204 ) 205 206 abs_rv = np.abs(self.rho_v[m]) 207 if abs_rv >= 1.0: 208 self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon) 209 210 cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2)) 211 212 denom_e = float((cos_rho_v * cos_b) + self.epsilon) 213 bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e 214 215 errors[k] = bar_e * self.xi_half 216 outputs[k] = d_in[k] - errors[k] 217 218 self.bar_b_prev = bar_b_curr.copy() 219 220 self.w = self.rho_v.copy() 221 self._record_history() 222 223 runtime_s = float(perf_counter() - t0) 224 if verbose: 225 print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms") 226 227 extra: Optional[Dict[str, Any]] = None 228 if return_internal_states: 229 extra = { 230 "rho": self.rho.copy(), 231 "rho_v": self.rho_v.copy(), 232 "xi_half": self.xi_half, 233 } 234 235 return self._pack_results( 236 outputs=outputs, 237 errors=errors, 238 runtime_s=runtime_s, 239 error_type="a_posteriori", 240 extra=extra, 241 )
Normalized Lattice RLS (NLRLS) algorithm based on a posteriori error.
Implements Algorithm 7.6 (Diniz). The goal of the normalized lattice recursion is improved numerical robustness: internal normalized variables (errors and reflection-like coefficients) are designed to be magnitude-bounded by 1.
Library conventions
- Complex-valued implementation (supports_complex=True).
- For API consistency, we expose rho_v (length M+1) as the "coefficient vector":
- self.w mirrors self.rho_v
- self.w_history stores rho_v trajectories
- optimize returns OptimizationResult with coefficients stacked from w_history
46 def __init__( 47 self, 48 filter_order: int, 49 lambda_factor: float = 0.99, 50 epsilon: float = 1e-6, 51 w_init: Optional[Union[np.ndarray, list]] = None, 52 denom_floor: float = 1e-12, 53 ) -> None: 54 """ 55 Parameters 56 ---------- 57 filter_order: 58 Number of lattice sections M. The estimation stage uses M+1 coefficients. 59 lambda_factor: 60 Forgetting factor λ. 61 epsilon: 62 Regularization used in normalizations and clipping. 63 w_init: 64 Optional initialization for rho_v (length M+1). If None, zeros. 65 denom_floor: 66 Extra floor for denominators / sqrt protections. 67 """ 68 super().__init__(filter_order=filter_order, w_init=w_init) 69 70 self.lam = float(lambda_factor) 71 self.epsilon = float(epsilon) 72 self.n_sections = int(filter_order) 73 self._tiny = float(denom_floor) 74 75 self.rho = np.zeros(self.n_sections, dtype=complex) 76 77 if w_init is not None: 78 rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1) 79 if rho_v0.size != self.n_sections + 1: 80 raise ValueError( 81 f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}" 82 ) 83 self.rho_v = rho_v0 84 else: 85 self.rho_v = np.zeros(self.n_sections + 1, dtype=complex) 86 87 self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 88 self.xi_half = float(np.sqrt(self.epsilon)) 89 90 self.w = self.rho_v.copy() 91 self.w_history = [] 92 self._record_history()
Parameters
filter_order: Number of lattice sections M. The estimation stage uses M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization used in normalizations and clipping. w_init: Optional initialization for rho_v (length M+1). If None, zeros. denom_floor: Extra floor for denominators / sqrt protections.
101 @validate_input 102 def optimize( 103 self, 104 input_signal: np.ndarray, 105 desired_signal: np.ndarray, 106 verbose: bool = False, 107 return_internal_states: bool = False, 108 ) -> OptimizationResult: 109 """ 110 Runs the Normalized LRLS adaptation. 111 112 Returns 113 ------- 114 OptimizationResult 115 outputs: 116 Estimated output y[k]. 117 errors: 118 A posteriori error e[k]. 119 coefficients: 120 History of rho_v (stacked from w_history). 121 error_type: 122 "a_posteriori". 123 124 Extra (when return_internal_states=True) 125 -------------------------------------- 126 extra["rho"]: 127 Final rho vector (length M). 128 extra["rho_v"]: 129 Final rho_v vector (length M+1). 130 extra["xi_half"]: 131 Final xi_half scalar. 132 """ 133 t0 = perf_counter() 134 135 # validate_input already normalizes to 1D and matches lengths. 136 # Force complex to respect supports_complex=True (even if x/d are real). 137 x_in = np.asarray(input_signal, dtype=complex).ravel() 138 d_in = np.asarray(desired_signal, dtype=complex).ravel() 139 140 n_samples = int(d_in.size) 141 outputs = np.zeros(n_samples, dtype=complex) 142 errors = np.zeros(n_samples, dtype=complex) 143 144 sqrt_lam = float(np.sqrt(self.lam)) 145 146 for k in range(n_samples): 147 # Update xi_half (sqrt energy) 148 xi_sq = float(self.xi_half**2) 149 xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2)) 150 self.xi_half = self._safe_sqrt(xi_sq) 151 152 denom_x = float(self.xi_half + self.epsilon) 153 bar_f = x_in[k] / denom_x 154 155 abs_bf = np.abs(bar_f) 156 if abs_bf > 1.0: 157 bar_f = bar_f / abs_bf 158 159 bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex) 160 bar_b_curr[0] = bar_f 161 162 # ------------------------- 163 # Prediction stage 164 # ------------------------- 165 for m in range(self.n_sections): 166 cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2)) 167 cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2)) 168 169 self.rho[m] = ( 170 (sqrt_lam * cos_f * cos_b_prev * self.rho[m]) 171 + (np.conj(bar_f) * self.bar_b_prev[m]) 172 ) 173 174 abs_rho = np.abs(self.rho[m]) 175 if abs_rho >= 1.0: 176 self.rho[m] = self.rho[m] / (abs_rho + self.epsilon) 177 178 cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2)) 179 180 denom_f = float((cos_rho * cos_b_prev) + self.epsilon) 181 denom_b = float((cos_rho * cos_f) + self.epsilon) 182 183 f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f 184 b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b 185 186 bar_f = f_next 187 bar_b_curr[m + 1] = b_next 188 189 # ------------------------- 190 # Estimation stage 191 # ------------------------- 192 bar_e = d_in[k] / float(self.xi_half + self.epsilon) 193 abs_be = np.abs(bar_e) 194 if abs_be > 1.0: 195 bar_e = bar_e / abs_be 196 197 for m in range(self.n_sections + 1): 198 cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2)) 199 cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2)) 200 201 self.rho_v[m] = ( 202 (sqrt_lam * cos_e * cos_b * self.rho_v[m]) 203 + (np.conj(bar_e) * bar_b_curr[m]) 204 ) 205 206 abs_rv = np.abs(self.rho_v[m]) 207 if abs_rv >= 1.0: 208 self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon) 209 210 cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2)) 211 212 denom_e = float((cos_rho_v * cos_b) + self.epsilon) 213 bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e 214 215 errors[k] = bar_e * self.xi_half 216 outputs[k] = d_in[k] - errors[k] 217 218 self.bar_b_prev = bar_b_curr.copy() 219 220 self.w = self.rho_v.copy() 221 self._record_history() 222 223 runtime_s = float(perf_counter() - t0) 224 if verbose: 225 print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms") 226 227 extra: Optional[Dict[str, Any]] = None 228 if return_internal_states: 229 extra = { 230 "rho": self.rho.copy(), 231 "rho_v": self.rho_v.copy(), 232 "xi_half": self.xi_half, 233 } 234 235 return self._pack_results( 236 outputs=outputs, 237 errors=errors, 238 runtime_s=runtime_s, 239 error_type="a_posteriori", 240 extra=extra, 241 )
Runs the Normalized LRLS adaptation.
Returns
OptimizationResult outputs: Estimated output y[k]. errors: A posteriori error e[k]. coefficients: History of rho_v (stacked from w_history). error_type: "a_posteriori".
Extra (when return_internal_states=True)
extra["rho"]: Final rho vector (length M). extra["rho_v"]: Final rho_v vector (length M+1). extra["xi_half"]: Final xi_half scalar.
25class FastRLS(AdaptiveFilter): 26 """ 27 Implements the Fast Transversal RLS algorithm for complex-valued data. 28 29 This is a supervised algorithm, i.e., it requires both input_signal and desired_signal. 30 """ 31 supports_complex: bool = True 32 33 forgetting_factor: float 34 epsilon: float 35 n_coeffs: int 36 37 def __init__( 38 self, 39 filter_order: int, 40 forgetting_factor: float = 0.99, 41 epsilon: float = 0.1, 42 w_init: Optional[Union[np.ndarray, list]] = None, 43 ) -> None: 44 """ 45 Parameters 46 ---------- 47 filter_order: 48 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 49 forgetting_factor: 50 Forgetting factor (lambda), typically close to 1. 51 epsilon: 52 Regularization / initial prediction error energy (positive). 53 w_init: 54 Optional initial coefficient vector. If None, initializes to zeros. 55 """ 56 super().__init__(filter_order=filter_order, w_init=w_init) 57 self.forgetting_factor = float(forgetting_factor) 58 self.epsilon = float(epsilon) 59 self.n_coeffs = int(filter_order + 1) 60 61 @validate_input 62 def optimize( 63 self, 64 input_signal: np.ndarray, 65 desired_signal: np.ndarray, 66 verbose: bool = False, 67 return_internal_states: bool = False, 68 ) -> OptimizationResult: 69 """ 70 Executes the Fast Transversal RLS algorithm. 71 72 Parameters 73 ---------- 74 input_signal: 75 Input signal x[k]. 76 desired_signal: 77 Desired signal d[k]. 78 verbose: 79 If True, prints runtime. 80 return_internal_states: 81 If True, returns additional internal trajectories in result.extra. 82 83 Returns 84 ------- 85 OptimizationResult 86 outputs: 87 A-priori output y[k]. 88 errors: 89 A-priori error e[k] = d[k] - y[k]. 90 coefficients: 91 History of coefficients stored in the base class. 92 error_type: 93 "a_priori". 94 95 Extra (always) 96 ------------- 97 extra["outputs_posteriori"]: 98 A-posteriori output sequence. 99 extra["errors_posteriori"]: 100 A-posteriori error sequence. 101 102 Extra (when return_internal_states=True) 103 -------------------------------------- 104 extra["gamma"]: 105 Conversion factor trajectory. 106 extra["xi_min_f"]: 107 Forward prediction minimum error energy trajectory. 108 """ 109 tic: float = time() 110 111 x: np.ndarray = np.asarray(input_signal) 112 d: np.ndarray = np.asarray(desired_signal) 113 114 n_samples: int = int(x.size) 115 m_plus_1: int = int(self.filter_order + 1) 116 117 outputs: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 118 errors: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 119 outputs_post: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 120 errors_post: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 121 122 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 123 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 124 125 w_f: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 126 w_b: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 127 phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 128 129 gamma_n: float = 1.0 130 xi_min_f_prev: float = float(self.epsilon) 131 xi_min_b: float = float(self.epsilon) 132 133 x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=x.dtype) 134 x_padded[m_plus_1:] = x 135 136 for k in range(n_samples): 137 regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1] 138 139 e_f_priori: complex = complex(regressor[0] - np.dot(w_f.conj(), regressor[1:])) 140 e_f_post: complex = complex(e_f_priori * gamma_n) 141 142 xi_min_f_curr: float = float( 143 self.forgetting_factor * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post)) 144 ) 145 146 phi_gain: complex = complex(e_f_priori / (self.forgetting_factor * xi_min_f_prev)) 147 148 phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=x.dtype) 149 phi_hat_n_plus_1[1:] = phi_hat_n 150 phi_hat_n_plus_1[0] += phi_gain 151 phi_hat_n_plus_1[1:] -= phi_gain * w_f 152 153 w_f = w_f + phi_hat_n * np.conj(e_f_post) 154 155 gamma_n_plus_1: float = float((self.forgetting_factor * xi_min_f_prev * gamma_n) / xi_min_f_curr) 156 157 e_b_priori: complex = complex(self.forgetting_factor * xi_min_b * phi_hat_n_plus_1[-1]) 158 159 gamma_n = float( 160 1.0 / (np.real(1.0 / gamma_n_plus_1 - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori))) + 1e-30) 161 ) 162 163 e_b_post: complex = complex(e_b_priori * gamma_n) 164 xi_min_b = float(self.forgetting_factor * xi_min_b + np.real(e_b_post * np.conj(e_b_priori))) 165 166 phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b 167 w_b = w_b + phi_hat_n * np.conj(e_b_post) 168 169 y_k: complex = complex(np.dot(self.w.conj(), regressor[:-1])) 170 outputs[k] = y_k 171 errors[k] = d[k] - outputs[k] 172 173 errors_post[k] = errors[k] * gamma_n 174 outputs_post[k] = d[k] - errors_post[k] 175 176 self.w = self.w + phi_hat_n * np.conj(errors_post[k]) 177 178 if return_internal_states and gamma_track is not None and xi_f_track is not None: 179 gamma_track[k] = gamma_n 180 xi_f_track[k] = xi_min_f_curr 181 182 xi_min_f_prev = xi_min_f_curr 183 self._record_history() 184 185 runtime_s: float = float(time() - tic) 186 if verbose: 187 print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms") 188 189 extra: Dict[str, Any] = { 190 "outputs_posteriori": outputs_post, 191 "errors_posteriori": errors_post, 192 } 193 if return_internal_states: 194 extra.update( 195 { 196 "gamma": gamma_track, 197 "xi_min_f": xi_f_track, 198 } 199 ) 200 201 return self._pack_results( 202 outputs=outputs, 203 errors=errors, 204 runtime_s=runtime_s, 205 error_type="a_priori", 206 extra=extra, 207 )
Implements the Fast Transversal RLS algorithm for complex-valued data.
This is a supervised algorithm, i.e., it requires both input_signal and desired_signal.
37 def __init__( 38 self, 39 filter_order: int, 40 forgetting_factor: float = 0.99, 41 epsilon: float = 0.1, 42 w_init: Optional[Union[np.ndarray, list]] = None, 43 ) -> None: 44 """ 45 Parameters 46 ---------- 47 filter_order: 48 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 49 forgetting_factor: 50 Forgetting factor (lambda), typically close to 1. 51 epsilon: 52 Regularization / initial prediction error energy (positive). 53 w_init: 54 Optional initial coefficient vector. If None, initializes to zeros. 55 """ 56 super().__init__(filter_order=filter_order, w_init=w_init) 57 self.forgetting_factor = float(forgetting_factor) 58 self.epsilon = float(epsilon) 59 self.n_coeffs = int(filter_order + 1)
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. forgetting_factor: Forgetting factor (lambda), typically close to 1. epsilon: Regularization / initial prediction error energy (positive). w_init: Optional initial coefficient vector. If None, initializes to zeros.
61 @validate_input 62 def optimize( 63 self, 64 input_signal: np.ndarray, 65 desired_signal: np.ndarray, 66 verbose: bool = False, 67 return_internal_states: bool = False, 68 ) -> OptimizationResult: 69 """ 70 Executes the Fast Transversal RLS algorithm. 71 72 Parameters 73 ---------- 74 input_signal: 75 Input signal x[k]. 76 desired_signal: 77 Desired signal d[k]. 78 verbose: 79 If True, prints runtime. 80 return_internal_states: 81 If True, returns additional internal trajectories in result.extra. 82 83 Returns 84 ------- 85 OptimizationResult 86 outputs: 87 A-priori output y[k]. 88 errors: 89 A-priori error e[k] = d[k] - y[k]. 90 coefficients: 91 History of coefficients stored in the base class. 92 error_type: 93 "a_priori". 94 95 Extra (always) 96 ------------- 97 extra["outputs_posteriori"]: 98 A-posteriori output sequence. 99 extra["errors_posteriori"]: 100 A-posteriori error sequence. 101 102 Extra (when return_internal_states=True) 103 -------------------------------------- 104 extra["gamma"]: 105 Conversion factor trajectory. 106 extra["xi_min_f"]: 107 Forward prediction minimum error energy trajectory. 108 """ 109 tic: float = time() 110 111 x: np.ndarray = np.asarray(input_signal) 112 d: np.ndarray = np.asarray(desired_signal) 113 114 n_samples: int = int(x.size) 115 m_plus_1: int = int(self.filter_order + 1) 116 117 outputs: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 118 errors: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 119 outputs_post: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 120 errors_post: np.ndarray = np.zeros(n_samples, dtype=x.dtype) 121 122 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 123 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 124 125 w_f: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 126 w_b: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 127 phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=x.dtype) 128 129 gamma_n: float = 1.0 130 xi_min_f_prev: float = float(self.epsilon) 131 xi_min_b: float = float(self.epsilon) 132 133 x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=x.dtype) 134 x_padded[m_plus_1:] = x 135 136 for k in range(n_samples): 137 regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1] 138 139 e_f_priori: complex = complex(regressor[0] - np.dot(w_f.conj(), regressor[1:])) 140 e_f_post: complex = complex(e_f_priori * gamma_n) 141 142 xi_min_f_curr: float = float( 143 self.forgetting_factor * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post)) 144 ) 145 146 phi_gain: complex = complex(e_f_priori / (self.forgetting_factor * xi_min_f_prev)) 147 148 phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=x.dtype) 149 phi_hat_n_plus_1[1:] = phi_hat_n 150 phi_hat_n_plus_1[0] += phi_gain 151 phi_hat_n_plus_1[1:] -= phi_gain * w_f 152 153 w_f = w_f + phi_hat_n * np.conj(e_f_post) 154 155 gamma_n_plus_1: float = float((self.forgetting_factor * xi_min_f_prev * gamma_n) / xi_min_f_curr) 156 157 e_b_priori: complex = complex(self.forgetting_factor * xi_min_b * phi_hat_n_plus_1[-1]) 158 159 gamma_n = float( 160 1.0 / (np.real(1.0 / gamma_n_plus_1 - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori))) + 1e-30) 161 ) 162 163 e_b_post: complex = complex(e_b_priori * gamma_n) 164 xi_min_b = float(self.forgetting_factor * xi_min_b + np.real(e_b_post * np.conj(e_b_priori))) 165 166 phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b 167 w_b = w_b + phi_hat_n * np.conj(e_b_post) 168 169 y_k: complex = complex(np.dot(self.w.conj(), regressor[:-1])) 170 outputs[k] = y_k 171 errors[k] = d[k] - outputs[k] 172 173 errors_post[k] = errors[k] * gamma_n 174 outputs_post[k] = d[k] - errors_post[k] 175 176 self.w = self.w + phi_hat_n * np.conj(errors_post[k]) 177 178 if return_internal_states and gamma_track is not None and xi_f_track is not None: 179 gamma_track[k] = gamma_n 180 xi_f_track[k] = xi_min_f_curr 181 182 xi_min_f_prev = xi_min_f_curr 183 self._record_history() 184 185 runtime_s: float = float(time() - tic) 186 if verbose: 187 print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms") 188 189 extra: Dict[str, Any] = { 190 "outputs_posteriori": outputs_post, 191 "errors_posteriori": errors_post, 192 } 193 if return_internal_states: 194 extra.update( 195 { 196 "gamma": gamma_track, 197 "xi_min_f": xi_f_track, 198 } 199 ) 200 201 return self._pack_results( 202 outputs=outputs, 203 errors=errors, 204 runtime_s=runtime_s, 205 error_type="a_priori", 206 extra=extra, 207 )
Executes the Fast Transversal RLS algorithm.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns additional internal trajectories in result.extra.
Returns
OptimizationResult outputs: A-priori output y[k]. errors: A-priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
Extra (always)
extra["outputs_posteriori"]: A-posteriori output sequence. extra["errors_posteriori"]: A-posteriori error sequence.
Extra (when return_internal_states=True)
extra["gamma"]: Conversion factor trajectory. extra["xi_min_f"]: Forward prediction minimum error energy trajectory.
26class StabFastRLS(AdaptiveFilter): 27 """ 28 Implements the Stabilized Fast Transversal RLS algorithm for real-valued data. 29 """ 30 supports_complex: bool = False 31 32 lambda_: float 33 epsilon: float 34 kappa1: float 35 kappa2: float 36 kappa3: float 37 denom_floor: float 38 xi_floor: float 39 gamma_clip: Optional[float] 40 n_coeffs: int 41 42 def __init__( 43 self, 44 filter_order: int, 45 forgetting_factor: float = 0.99, 46 epsilon: float = 1e-1, 47 kappa1: float = 1.5, 48 kappa2: float = 2.5, 49 kappa3: float = 1.0, 50 w_init: Optional[Union[np.ndarray, list]] = None, 51 denom_floor: Optional[float] = None, 52 xi_floor: Optional[float] = None, 53 gamma_clip: Optional[float] = None, 54 ) -> None: 55 """ 56 Parameters 57 ---------- 58 filter_order: 59 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 60 forgetting_factor: 61 Forgetting factor (lambda), typically close to 1. 62 epsilon: 63 Regularization / initial prediction error energy (positive). 64 kappa1, kappa2, kappa3: 65 Stabilization parameters from the stabilized FTRLS formulation. 66 w_init: 67 Optional initial coefficient vector. If None, initializes to zeros. 68 denom_floor: 69 Floor for denominators used in safe inversions. If None, a tiny float-based default is used. 70 xi_floor: 71 Floor for prediction error energies. If None, a tiny float-based default is used. 72 gamma_clip: 73 Optional clipping threshold for gamma (if provided). 74 """ 75 super().__init__(filter_order=filter_order, w_init=w_init) 76 77 self.filter_order = int(filter_order) 78 self.n_coeffs = int(self.filter_order + 1) 79 80 self.lambda_ = float(forgetting_factor) 81 self.epsilon = float(epsilon) 82 self.kappa1 = float(kappa1) 83 self.kappa2 = float(kappa2) 84 self.kappa3 = float(kappa3) 85 86 finfo = np.finfo(np.float64) 87 self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3) 88 self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6) 89 self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None 90 91 self.w = np.asarray(self.w, dtype=np.float64) 92 93 @staticmethod 94 def _clamp_denom(den: float, floor: float) -> float: 95 if (not np.isfinite(den)) or (abs(den) < floor): 96 return float(np.copysign(floor, den if den != 0 else 1.0)) 97 return float(den) 98 99 def _safe_inv(self, den: float, floor: float, clamp_counter: Dict[str, int], key: str) -> float: 100 den_clamped = self._clamp_denom(den, floor) 101 if den_clamped != den: 102 clamp_counter[key] = clamp_counter.get(key, 0) + 1 103 return 1.0 / den_clamped 104 105 @ensure_real_signals 106 @validate_input 107 def optimize( 108 self, 109 input_signal: np.ndarray, 110 desired_signal: np.ndarray, 111 verbose: bool = False, 112 return_internal_states: bool = False, 113 ) -> OptimizationResult: 114 """ 115 Executes the Stabilized Fast Transversal RLS algorithm. 116 117 Parameters 118 ---------- 119 input_signal: 120 Input signal x[k]. 121 desired_signal: 122 Desired signal d[k]. 123 verbose: 124 If True, prints runtime. 125 return_internal_states: 126 If True, returns internal trajectories and clamping stats in result.extra. 127 128 Returns 129 ------- 130 OptimizationResult 131 outputs: 132 A-priori output y[k]. 133 errors: 134 A-priori error e[k] = d[k] - y[k]. 135 coefficients: 136 History of coefficients stored in the base class. 137 error_type: 138 "a_priori". 139 140 Extra (always) 141 ------------- 142 extra["errors_posteriori"]: 143 A-posteriori error sequence e_post[k] = gamma[k] * e[k]. 144 extra["clamp_stats"]: 145 Dictionary with counters of how many times each denominator was clamped. 146 147 Extra (when return_internal_states=True) 148 -------------------------------------- 149 extra["xi_min_f"]: 150 Forward prediction error energy trajectory. 151 extra["xi_min_b"]: 152 Backward prediction error energy trajectory. 153 extra["gamma"]: 154 Conversion factor trajectory. 155 """ 156 tic: float = time() 157 158 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 159 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 160 161 n_samples: int = int(x.size) 162 n_taps: int = int(self.filter_order + 1) 163 reg_len: int = int(self.filter_order + 2) 164 165 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 166 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 167 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64) 168 169 xi_min_f: float = float(self.epsilon) 170 xi_min_b: float = float(self.epsilon) 171 gamma_n_3: float = 1.0 172 173 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 174 xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 175 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 176 177 w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64) 178 w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64) 179 phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64) 180 phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64) 181 182 x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64) 183 x_padded[n_taps:] = x 184 185 clamp_counter: Dict[str, int] = {} 186 187 for k in range(n_samples): 188 r: np.ndarray = x_padded[k : k + reg_len][::-1] 189 190 e_f_priori: float = float(r[0] - np.dot(w_f, r[1:])) 191 e_f_post: float = float(e_f_priori * gamma_n_3) 192 193 scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f") 194 phi_hat_np1[0] = scale * e_f_priori 195 phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f 196 197 inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3") 198 gamma_np1_1: float = self._safe_inv( 199 inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1" 200 ) 201 202 if self.gamma_clip is not None: 203 gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip)) 204 205 inv_xi_f_lam: float = self._safe_inv( 206 xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f" 207 ) 208 xi_min_f = max( 209 self._safe_inv( 210 inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2), 211 self.denom_floor, 212 clamp_counter, 213 "inv_den_xi_f", 214 ), 215 self.xi_floor, 216 ) 217 218 w_f += phi_hat_n * e_f_post 219 220 e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1]) 221 e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1])) 222 223 eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1)) 224 eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2)) 225 eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3)) 226 227 inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1") 228 gamma_n_2: float = self._safe_inv( 229 inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2" 230 ) 231 232 xi_min_b = max( 233 float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2), 234 self.xi_floor, 235 ) 236 237 phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b 238 w_b += phi_hat_n * (eb3_1 * gamma_n_2) 239 240 gamma_n_3 = self._safe_inv( 241 1.0 + float(np.dot(phi_hat_n, r[:-1])), 242 self.denom_floor, 243 clamp_counter, 244 "inv_g_n3", 245 ) 246 247 if return_internal_states and xi_f_track is not None and xi_b_track is not None and gamma_track is not None: 248 xi_f_track[k] = xi_min_f 249 xi_b_track[k] = xi_min_b 250 gamma_track[k] = gamma_n_3 251 252 y_k: float = float(np.dot(self.w, r[:-1])) 253 outputs[k] = y_k 254 255 e_k: float = float(d[k] - y_k) 256 errors[k] = e_k 257 258 e_post_k: float = float(e_k * gamma_n_3) 259 errors_post[k] = e_post_k 260 261 self.w += phi_hat_n * e_post_k 262 self._record_history() 263 264 runtime_s: float = float(time() - tic) 265 if verbose: 266 print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms") 267 268 extra: Dict[str, Any] = { 269 "errors_posteriori": errors_post, 270 "clamp_stats": clamp_counter, 271 } 272 if return_internal_states: 273 extra.update( 274 { 275 "xi_min_f": xi_f_track, 276 "xi_min_b": xi_b_track, 277 "gamma": gamma_track, 278 } 279 ) 280 281 return self._pack_results( 282 outputs=outputs, 283 errors=errors, 284 runtime_s=runtime_s, 285 error_type="a_priori", 286 extra=extra, 287 )
Implements the Stabilized Fast Transversal RLS algorithm for real-valued data.
42 def __init__( 43 self, 44 filter_order: int, 45 forgetting_factor: float = 0.99, 46 epsilon: float = 1e-1, 47 kappa1: float = 1.5, 48 kappa2: float = 2.5, 49 kappa3: float = 1.0, 50 w_init: Optional[Union[np.ndarray, list]] = None, 51 denom_floor: Optional[float] = None, 52 xi_floor: Optional[float] = None, 53 gamma_clip: Optional[float] = None, 54 ) -> None: 55 """ 56 Parameters 57 ---------- 58 filter_order: 59 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 60 forgetting_factor: 61 Forgetting factor (lambda), typically close to 1. 62 epsilon: 63 Regularization / initial prediction error energy (positive). 64 kappa1, kappa2, kappa3: 65 Stabilization parameters from the stabilized FTRLS formulation. 66 w_init: 67 Optional initial coefficient vector. If None, initializes to zeros. 68 denom_floor: 69 Floor for denominators used in safe inversions. If None, a tiny float-based default is used. 70 xi_floor: 71 Floor for prediction error energies. If None, a tiny float-based default is used. 72 gamma_clip: 73 Optional clipping threshold for gamma (if provided). 74 """ 75 super().__init__(filter_order=filter_order, w_init=w_init) 76 77 self.filter_order = int(filter_order) 78 self.n_coeffs = int(self.filter_order + 1) 79 80 self.lambda_ = float(forgetting_factor) 81 self.epsilon = float(epsilon) 82 self.kappa1 = float(kappa1) 83 self.kappa2 = float(kappa2) 84 self.kappa3 = float(kappa3) 85 86 finfo = np.finfo(np.float64) 87 self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3) 88 self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6) 89 self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None 90 91 self.w = np.asarray(self.w, dtype=np.float64)
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. forgetting_factor: Forgetting factor (lambda), typically close to 1. epsilon: Regularization / initial prediction error energy (positive). kappa1, kappa2, kappa3: Stabilization parameters from the stabilized FTRLS formulation. w_init: Optional initial coefficient vector. If None, initializes to zeros. denom_floor: Floor for denominators used in safe inversions. If None, a tiny float-based default is used. xi_floor: Floor for prediction error energies. If None, a tiny float-based default is used. gamma_clip: Optional clipping threshold for gamma (if provided).
105 @ensure_real_signals 106 @validate_input 107 def optimize( 108 self, 109 input_signal: np.ndarray, 110 desired_signal: np.ndarray, 111 verbose: bool = False, 112 return_internal_states: bool = False, 113 ) -> OptimizationResult: 114 """ 115 Executes the Stabilized Fast Transversal RLS algorithm. 116 117 Parameters 118 ---------- 119 input_signal: 120 Input signal x[k]. 121 desired_signal: 122 Desired signal d[k]. 123 verbose: 124 If True, prints runtime. 125 return_internal_states: 126 If True, returns internal trajectories and clamping stats in result.extra. 127 128 Returns 129 ------- 130 OptimizationResult 131 outputs: 132 A-priori output y[k]. 133 errors: 134 A-priori error e[k] = d[k] - y[k]. 135 coefficients: 136 History of coefficients stored in the base class. 137 error_type: 138 "a_priori". 139 140 Extra (always) 141 ------------- 142 extra["errors_posteriori"]: 143 A-posteriori error sequence e_post[k] = gamma[k] * e[k]. 144 extra["clamp_stats"]: 145 Dictionary with counters of how many times each denominator was clamped. 146 147 Extra (when return_internal_states=True) 148 -------------------------------------- 149 extra["xi_min_f"]: 150 Forward prediction error energy trajectory. 151 extra["xi_min_b"]: 152 Backward prediction error energy trajectory. 153 extra["gamma"]: 154 Conversion factor trajectory. 155 """ 156 tic: float = time() 157 158 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 159 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 160 161 n_samples: int = int(x.size) 162 n_taps: int = int(self.filter_order + 1) 163 reg_len: int = int(self.filter_order + 2) 164 165 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 166 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 167 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64) 168 169 xi_min_f: float = float(self.epsilon) 170 xi_min_b: float = float(self.epsilon) 171 gamma_n_3: float = 1.0 172 173 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 174 xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 175 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 176 177 w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64) 178 w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64) 179 phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64) 180 phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64) 181 182 x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64) 183 x_padded[n_taps:] = x 184 185 clamp_counter: Dict[str, int] = {} 186 187 for k in range(n_samples): 188 r: np.ndarray = x_padded[k : k + reg_len][::-1] 189 190 e_f_priori: float = float(r[0] - np.dot(w_f, r[1:])) 191 e_f_post: float = float(e_f_priori * gamma_n_3) 192 193 scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f") 194 phi_hat_np1[0] = scale * e_f_priori 195 phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f 196 197 inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3") 198 gamma_np1_1: float = self._safe_inv( 199 inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1" 200 ) 201 202 if self.gamma_clip is not None: 203 gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip)) 204 205 inv_xi_f_lam: float = self._safe_inv( 206 xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f" 207 ) 208 xi_min_f = max( 209 self._safe_inv( 210 inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2), 211 self.denom_floor, 212 clamp_counter, 213 "inv_den_xi_f", 214 ), 215 self.xi_floor, 216 ) 217 218 w_f += phi_hat_n * e_f_post 219 220 e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1]) 221 e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1])) 222 223 eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1)) 224 eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2)) 225 eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3)) 226 227 inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1") 228 gamma_n_2: float = self._safe_inv( 229 inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2" 230 ) 231 232 xi_min_b = max( 233 float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2), 234 self.xi_floor, 235 ) 236 237 phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b 238 w_b += phi_hat_n * (eb3_1 * gamma_n_2) 239 240 gamma_n_3 = self._safe_inv( 241 1.0 + float(np.dot(phi_hat_n, r[:-1])), 242 self.denom_floor, 243 clamp_counter, 244 "inv_g_n3", 245 ) 246 247 if return_internal_states and xi_f_track is not None and xi_b_track is not None and gamma_track is not None: 248 xi_f_track[k] = xi_min_f 249 xi_b_track[k] = xi_min_b 250 gamma_track[k] = gamma_n_3 251 252 y_k: float = float(np.dot(self.w, r[:-1])) 253 outputs[k] = y_k 254 255 e_k: float = float(d[k] - y_k) 256 errors[k] = e_k 257 258 e_post_k: float = float(e_k * gamma_n_3) 259 errors_post[k] = e_post_k 260 261 self.w += phi_hat_n * e_post_k 262 self._record_history() 263 264 runtime_s: float = float(time() - tic) 265 if verbose: 266 print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms") 267 268 extra: Dict[str, Any] = { 269 "errors_posteriori": errors_post, 270 "clamp_stats": clamp_counter, 271 } 272 if return_internal_states: 273 extra.update( 274 { 275 "xi_min_f": xi_f_track, 276 "xi_min_b": xi_b_track, 277 "gamma": gamma_track, 278 } 279 ) 280 281 return self._pack_results( 282 outputs=outputs, 283 errors=errors, 284 runtime_s=runtime_s, 285 error_type="a_priori", 286 extra=extra, 287 )
Executes the Stabilized Fast Transversal RLS algorithm.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns internal trajectories and clamping stats in result.extra.
Returns
OptimizationResult outputs: A-priori output y[k]. errors: A-priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "a_priori".
Extra (always)
extra["errors_posteriori"]: A-posteriori error sequence e_post[k] = gamma[k] * e[k]. extra["clamp_stats"]: Dictionary with counters of how many times each denominator was clamped.
Extra (when return_internal_states=True)
extra["xi_min_f"]: Forward prediction error energy trajectory. extra["xi_min_b"]: Backward prediction error energy trajectory. extra["gamma"]: Conversion factor trajectory.
35class QRRLS(AdaptiveFilter): 36 """ 37 QR-RLS (real-valued) using Givens rotations. 38 39 Implements Algorithm 9.1 (Diniz, 3rd ed.) in a QR-decomposition framework. 40 This version mirrors the provided MATLAB routine (QR_RLS.m) and keeps the 41 same internal state variables. 42 43 Key internal state (MATLAB naming) 44 --------------------------------- 45 ULineMatrix: 46 Square matrix updated by sequential Givens rotations (size n_coeffs x n_coeffs). 47 dLine_q2: 48 Transformed desired vector (size n_coeffs,). 49 gamma: 50 Likelihood scalar accumulated as a product of cosines in the Givens steps. 51 52 Notes 53 ----- 54 - Real-valued only (supports_complex=False). 55 - The returned `errors` correspond to the MATLAB `errorVector`: 56 e[k] = d_line * gamma 57 and the output is: 58 y[k] = d[k] - e[k] 59 Therefore we label `error_type="a_posteriori"` to match the MATLAB-style 60 “post-rotation” error quantity. 61 """ 62 63 supports_complex: bool = False 64 65 lamb: float 66 n_coeffs: int 67 ULineMatrix: np.ndarray 68 dLine_q2: np.ndarray 69 _tiny: float 70 71 def __init__( 72 self, 73 filter_order: int, 74 lamb: float = 0.99, 75 w_init: Optional[ArrayLike] = None, 76 *, 77 denom_floor: float = 1e-18, 78 ) -> None: 79 """ 80 Parameters 81 ---------- 82 filter_order: 83 FIR order M (number of coefficients is M+1). 84 lamb: 85 Forgetting factor λ, must satisfy 0 < λ <= 1. 86 w_init: 87 Optional initial coefficients (length M+1). If None, zeros are used. 88 denom_floor: 89 Small floor used to avoid division by (near) zero in scalar denominators. 90 """ 91 super().__init__(filter_order=int(filter_order), w_init=w_init) 92 93 self.lamb = float(lamb) 94 if not (0.0 < self.lamb <= 1.0): 95 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.") 96 97 self._tiny = float(denom_floor) 98 99 self.n_coeffs = int(self.filter_order) + 1 100 101 self.w = np.asarray(self.w, dtype=np.float64) 102 103 if w_init is not None: 104 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 105 if w0.size != self.n_coeffs: 106 raise ValueError( 107 f"w_init must have length {self.n_coeffs}, got {w0.size}." 108 ) 109 self.w = w0.copy() 110 111 self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64) 112 self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64) 113 114 self.w_history = [] 115 self._record_history() 116 117 @staticmethod 118 def _givens_rotate_rows( 119 row0: np.ndarray, 120 row1: np.ndarray, 121 cos_t: float, 122 sin_t: float, 123 ) -> tuple[np.ndarray, np.ndarray]: 124 """ 125 Apply a 2x2 real Givens rotation: 126 127 [ cos -sin ] [row0] = [row0'] 128 [ sin cos ] [row1] [row1'] 129 130 Parameters 131 ---------- 132 row0, row1: 133 1-D arrays (same length) representing stacked rows. 134 cos_t, sin_t: 135 Givens rotation cosine and sine. 136 137 Returns 138 ------- 139 (row0_rot, row1_rot): 140 Rotated rows. 141 """ 142 new0 = cos_t * row0 - sin_t * row1 143 new1 = sin_t * row0 + cos_t * row1 144 return new0, new1 145 146 @ensure_real_signals 147 @validate_input 148 def optimize( 149 self, 150 input_signal: np.ndarray, 151 desired_signal: np.ndarray, 152 verbose: bool = False, 153 return_internal_states: bool = False, 154 ) -> OptimizationResult: 155 """ 156 Run QR-RLS adaptation over (x[k], d[k]) using the MATLAB-style recursion. 157 158 Parameters 159 ---------- 160 input_signal: 161 Input sequence x[k] (real), shape (N,). 162 desired_signal: 163 Desired sequence d[k] (real), shape (N,). 164 verbose: 165 If True, prints runtime. 166 return_internal_states: 167 If True, includes selected internal state in `result.extra`. 168 169 Returns 170 ------- 171 OptimizationResult 172 outputs: 173 Estimated output y[k] (real). 174 errors: 175 MATLAB-style error quantity e[k] = d_line * gamma (real). 176 coefficients: 177 History of coefficients stored in the base class. 178 error_type: 179 "a_posteriori". 180 181 Extra (when return_internal_states=True) 182 -------------------------------------- 183 extra["ULineMatrix_last"]: 184 Final ULineMatrix. 185 extra["dLine_q2_last"]: 186 Final dLine_q2. 187 extra["gamma_last"]: 188 gamma at the last iteration. 189 extra["d_line_last"]: 190 d_line at the last iteration. 191 """ 192 t0 = perf_counter() 193 194 x = np.asarray(input_signal, dtype=np.float64).ravel() 195 d = np.asarray(desired_signal, dtype=np.float64).ravel() 196 197 n_samples = int(d.size) 198 n = int(self.n_coeffs) 199 M = int(self.filter_order) 200 201 if n_samples < n: 202 raise ValueError( 203 f"QR-RLS needs at least (filter_order+1) samples. " 204 f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}." 205 ) 206 207 outputs = np.zeros(n_samples, dtype=np.float64) 208 errors = np.zeros(n_samples, dtype=np.float64) 209 210 self.ULineMatrix.fill(0.0) 211 self.dLine_q2.fill(0.0) 212 213 self.w_history = [] 214 self._record_history() 215 216 denom0 = float(x[0]) 217 if abs(denom0) < self._tiny: 218 denom0 = self._tiny if denom0 >= 0.0 else -self._tiny 219 220 for kt in range(n): 221 w_tmp = np.zeros(n, dtype=np.float64) 222 w_tmp[0] = float(d[0] / denom0) 223 224 for ct in range(1, kt + 1): 225 num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct]) 226 w_tmp[ct] = float(num / denom0) 227 228 self.w = w_tmp 229 self._record_history() 230 231 xk = np.zeros(n, dtype=np.float64) 232 start = max(0, kt - M) 233 seg = x[start : kt + 1][::-1] 234 xk[: seg.size] = seg 235 outputs[kt] = float(np.dot(w_tmp, xk)) 236 237 sqrt_lam = float(np.sqrt(self.lamb)) 238 239 for it in range(M + 1): 240 scale = float(self.lamb ** ((it + 1) / 2.0)) 241 242 vec = x[(n - it - 1) :: -1] 243 self.ULineMatrix[it, 0 : (n - it)] = scale * vec 244 245 self.dLine_q2[it] = scale * float(d[n - it - 1]) 246 247 gamma_last: float = 1.0 248 d_line_last: float = float(d[n - 1]) 249 250 for k in range(n, n_samples): 251 gamma = 1.0 252 d_line = float(d[k]) 253 254 reg = x[k : k - M - 1 : -1].copy() 255 256 for rt in range(M + 1): 257 row_u = rt 258 col_u = n - 1 - rt 259 idx_r = n - 1 - rt 260 261 u_val = float(self.ULineMatrix[row_u, col_u]) 262 r_val = float(reg[idx_r]) 263 264 cI = float(np.sqrt(u_val * u_val + r_val * r_val)) 265 if cI < self._tiny: 266 cos_t, sin_t = 1.0, 0.0 267 else: 268 cos_t, sin_t = (u_val / cI), (r_val / cI) 269 270 reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows( 271 reg, self.ULineMatrix[row_u, :], cos_t, sin_t 272 ) 273 274 gamma *= cos_t 275 276 dq2_rt = float(self.dLine_q2[row_u]) 277 new_d_line = (cos_t * d_line) - (sin_t * dq2_rt) 278 new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt) 279 d_line = float(new_d_line) 280 self.dLine_q2[row_u] = float(new_dq2_rt) 281 282 d_bar = np.empty(n + 1, dtype=np.float64) 283 d_bar[0] = d_line 284 d_bar[1:] = self.dLine_q2 285 286 w_new = np.zeros(n, dtype=np.float64) 287 288 den = float(self.ULineMatrix[n - 1, 0]) 289 if abs(den) < self._tiny: 290 den = self._tiny if den >= 0.0 else -self._tiny 291 w_new[0] = float(d_bar[n] / den) 292 293 for it in range(1, M + 1): 294 row = n - 1 - it 295 u_vec = self.ULineMatrix[row, 0:it][::-1] 296 w_vec = w_new[0:it][::-1] 297 num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it]) 298 299 den = float(self.ULineMatrix[row, it]) 300 if abs(den) < self._tiny: 301 den = self._tiny if den >= 0.0 else -self._tiny 302 303 w_new[it] = float(num / den) 304 305 self.w = w_new 306 self._record_history() 307 308 self.dLine_q2 *= sqrt_lam 309 self.ULineMatrix *= sqrt_lam 310 311 errors[k] = float(d_line * gamma) 312 outputs[k] = float(d[k] - errors[k]) 313 314 gamma_last = float(gamma) 315 d_line_last = float(d_line) 316 317 runtime_s = float(perf_counter() - t0) 318 if verbose: 319 print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms") 320 321 extra: Optional[Dict[str, Any]] = None 322 if return_internal_states: 323 extra = { 324 "ULineMatrix_last": self.ULineMatrix.copy(), 325 "dLine_q2_last": self.dLine_q2.copy(), 326 "gamma_last": gamma_last, 327 "d_line_last": d_line_last, 328 "forgetting_factor": float(self.lamb), 329 } 330 331 return self._pack_results( 332 outputs=outputs, 333 errors=errors, 334 runtime_s=runtime_s, 335 error_type="a_posteriori", 336 extra=extra, 337 )
QR-RLS (real-valued) using Givens rotations.
Implements Algorithm 9.1 (Diniz, 3rd ed.) in a QR-decomposition framework. This version mirrors the provided MATLAB routine (QR_RLS.m) and keeps the same internal state variables.
Key internal state (MATLAB naming)
ULineMatrix: Square matrix updated by sequential Givens rotations (size n_coeffs x n_coeffs). dLine_q2: Transformed desired vector (size n_coeffs,). gamma: Likelihood scalar accumulated as a product of cosines in the Givens steps.
Notes
- Real-valued only (supports_complex=False).
- The returned
errorscorrespond to the MATLABerrorVector: e[k] = d_line * gamma and the output is: y[k] = d[k] - e[k] Therefore we labelerror_type="a_posteriori"to match the MATLAB-style “post-rotation” error quantity.
71 def __init__( 72 self, 73 filter_order: int, 74 lamb: float = 0.99, 75 w_init: Optional[ArrayLike] = None, 76 *, 77 denom_floor: float = 1e-18, 78 ) -> None: 79 """ 80 Parameters 81 ---------- 82 filter_order: 83 FIR order M (number of coefficients is M+1). 84 lamb: 85 Forgetting factor λ, must satisfy 0 < λ <= 1. 86 w_init: 87 Optional initial coefficients (length M+1). If None, zeros are used. 88 denom_floor: 89 Small floor used to avoid division by (near) zero in scalar denominators. 90 """ 91 super().__init__(filter_order=int(filter_order), w_init=w_init) 92 93 self.lamb = float(lamb) 94 if not (0.0 < self.lamb <= 1.0): 95 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.") 96 97 self._tiny = float(denom_floor) 98 99 self.n_coeffs = int(self.filter_order) + 1 100 101 self.w = np.asarray(self.w, dtype=np.float64) 102 103 if w_init is not None: 104 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 105 if w0.size != self.n_coeffs: 106 raise ValueError( 107 f"w_init must have length {self.n_coeffs}, got {w0.size}." 108 ) 109 self.w = w0.copy() 110 111 self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64) 112 self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64) 113 114 self.w_history = [] 115 self._record_history()
Parameters
filter_order: FIR order M (number of coefficients is M+1). lamb: Forgetting factor λ, must satisfy 0 < λ <= 1. w_init: Optional initial coefficients (length M+1). If None, zeros are used. denom_floor: Small floor used to avoid division by (near) zero in scalar denominators.
146 @ensure_real_signals 147 @validate_input 148 def optimize( 149 self, 150 input_signal: np.ndarray, 151 desired_signal: np.ndarray, 152 verbose: bool = False, 153 return_internal_states: bool = False, 154 ) -> OptimizationResult: 155 """ 156 Run QR-RLS adaptation over (x[k], d[k]) using the MATLAB-style recursion. 157 158 Parameters 159 ---------- 160 input_signal: 161 Input sequence x[k] (real), shape (N,). 162 desired_signal: 163 Desired sequence d[k] (real), shape (N,). 164 verbose: 165 If True, prints runtime. 166 return_internal_states: 167 If True, includes selected internal state in `result.extra`. 168 169 Returns 170 ------- 171 OptimizationResult 172 outputs: 173 Estimated output y[k] (real). 174 errors: 175 MATLAB-style error quantity e[k] = d_line * gamma (real). 176 coefficients: 177 History of coefficients stored in the base class. 178 error_type: 179 "a_posteriori". 180 181 Extra (when return_internal_states=True) 182 -------------------------------------- 183 extra["ULineMatrix_last"]: 184 Final ULineMatrix. 185 extra["dLine_q2_last"]: 186 Final dLine_q2. 187 extra["gamma_last"]: 188 gamma at the last iteration. 189 extra["d_line_last"]: 190 d_line at the last iteration. 191 """ 192 t0 = perf_counter() 193 194 x = np.asarray(input_signal, dtype=np.float64).ravel() 195 d = np.asarray(desired_signal, dtype=np.float64).ravel() 196 197 n_samples = int(d.size) 198 n = int(self.n_coeffs) 199 M = int(self.filter_order) 200 201 if n_samples < n: 202 raise ValueError( 203 f"QR-RLS needs at least (filter_order+1) samples. " 204 f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}." 205 ) 206 207 outputs = np.zeros(n_samples, dtype=np.float64) 208 errors = np.zeros(n_samples, dtype=np.float64) 209 210 self.ULineMatrix.fill(0.0) 211 self.dLine_q2.fill(0.0) 212 213 self.w_history = [] 214 self._record_history() 215 216 denom0 = float(x[0]) 217 if abs(denom0) < self._tiny: 218 denom0 = self._tiny if denom0 >= 0.0 else -self._tiny 219 220 for kt in range(n): 221 w_tmp = np.zeros(n, dtype=np.float64) 222 w_tmp[0] = float(d[0] / denom0) 223 224 for ct in range(1, kt + 1): 225 num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct]) 226 w_tmp[ct] = float(num / denom0) 227 228 self.w = w_tmp 229 self._record_history() 230 231 xk = np.zeros(n, dtype=np.float64) 232 start = max(0, kt - M) 233 seg = x[start : kt + 1][::-1] 234 xk[: seg.size] = seg 235 outputs[kt] = float(np.dot(w_tmp, xk)) 236 237 sqrt_lam = float(np.sqrt(self.lamb)) 238 239 for it in range(M + 1): 240 scale = float(self.lamb ** ((it + 1) / 2.0)) 241 242 vec = x[(n - it - 1) :: -1] 243 self.ULineMatrix[it, 0 : (n - it)] = scale * vec 244 245 self.dLine_q2[it] = scale * float(d[n - it - 1]) 246 247 gamma_last: float = 1.0 248 d_line_last: float = float(d[n - 1]) 249 250 for k in range(n, n_samples): 251 gamma = 1.0 252 d_line = float(d[k]) 253 254 reg = x[k : k - M - 1 : -1].copy() 255 256 for rt in range(M + 1): 257 row_u = rt 258 col_u = n - 1 - rt 259 idx_r = n - 1 - rt 260 261 u_val = float(self.ULineMatrix[row_u, col_u]) 262 r_val = float(reg[idx_r]) 263 264 cI = float(np.sqrt(u_val * u_val + r_val * r_val)) 265 if cI < self._tiny: 266 cos_t, sin_t = 1.0, 0.0 267 else: 268 cos_t, sin_t = (u_val / cI), (r_val / cI) 269 270 reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows( 271 reg, self.ULineMatrix[row_u, :], cos_t, sin_t 272 ) 273 274 gamma *= cos_t 275 276 dq2_rt = float(self.dLine_q2[row_u]) 277 new_d_line = (cos_t * d_line) - (sin_t * dq2_rt) 278 new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt) 279 d_line = float(new_d_line) 280 self.dLine_q2[row_u] = float(new_dq2_rt) 281 282 d_bar = np.empty(n + 1, dtype=np.float64) 283 d_bar[0] = d_line 284 d_bar[1:] = self.dLine_q2 285 286 w_new = np.zeros(n, dtype=np.float64) 287 288 den = float(self.ULineMatrix[n - 1, 0]) 289 if abs(den) < self._tiny: 290 den = self._tiny if den >= 0.0 else -self._tiny 291 w_new[0] = float(d_bar[n] / den) 292 293 for it in range(1, M + 1): 294 row = n - 1 - it 295 u_vec = self.ULineMatrix[row, 0:it][::-1] 296 w_vec = w_new[0:it][::-1] 297 num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it]) 298 299 den = float(self.ULineMatrix[row, it]) 300 if abs(den) < self._tiny: 301 den = self._tiny if den >= 0.0 else -self._tiny 302 303 w_new[it] = float(num / den) 304 305 self.w = w_new 306 self._record_history() 307 308 self.dLine_q2 *= sqrt_lam 309 self.ULineMatrix *= sqrt_lam 310 311 errors[k] = float(d_line * gamma) 312 outputs[k] = float(d[k] - errors[k]) 313 314 gamma_last = float(gamma) 315 d_line_last = float(d_line) 316 317 runtime_s = float(perf_counter() - t0) 318 if verbose: 319 print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms") 320 321 extra: Optional[Dict[str, Any]] = None 322 if return_internal_states: 323 extra = { 324 "ULineMatrix_last": self.ULineMatrix.copy(), 325 "dLine_q2_last": self.dLine_q2.copy(), 326 "gamma_last": gamma_last, 327 "d_line_last": d_line_last, 328 "forgetting_factor": float(self.lamb), 329 } 330 331 return self._pack_results( 332 outputs=outputs, 333 errors=errors, 334 runtime_s=runtime_s, 335 error_type="a_posteriori", 336 extra=extra, 337 )
Run QR-RLS adaptation over (x[k], d[k]) using the MATLAB-style recursion.
Parameters
input_signal:
Input sequence x[k] (real), shape (N,).
desired_signal:
Desired sequence d[k] (real), shape (N,).
verbose:
If True, prints runtime.
return_internal_states:
If True, includes selected internal state in result.extra.
Returns
OptimizationResult outputs: Estimated output y[k] (real). errors: MATLAB-style error quantity e[k] = d_line * gamma (real). coefficients: History of coefficients stored in the base class. error_type: "a_posteriori".
Extra (when return_internal_states=True)
extra["ULineMatrix_last"]: Final ULineMatrix. extra["dLine_q2_last"]: Final dLine_q2. extra["gamma_last"]: gamma at the last iteration. extra["d_line_last"]: d_line at the last iteration.
26class ErrorEquation(AdaptiveFilter): 27 """ 28 Implements the Equation Error RLS algorithm for real-valued IIR adaptive filtering. 29 """ 30 supports_complex: bool = False 31 32 zeros_order: int 33 poles_order: int 34 forgetting_factor: float 35 epsilon: float 36 n_coeffs: int 37 Sd: np.ndarray 38 y_buffer: np.ndarray 39 d_buffer: np.ndarray 40 41 def __init__( 42 self, 43 zeros_order: int, 44 poles_order: int, 45 forgetting_factor: float = 0.99, 46 epsilon: float = 1e-3, 47 w_init: Optional[Union[np.ndarray, list]] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 zeros_order: 53 Numerator order (number of zeros). 54 poles_order: 55 Denominator order (number of poles). 56 forgetting_factor: 57 Forgetting factor (lambda), typically close to 1. 58 epsilon: 59 Regularization / initialization parameter for the inverse correlation matrix. 60 w_init: 61 Optional initial coefficient vector. If None, initializes to zeros. 62 63 Notes 64 ----- 65 Coefficient vector convention: 66 - First `poles_order` entries correspond to denominator (pole) parameters. 67 - Remaining entries correspond to numerator (zero) parameters. 68 """ 69 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 70 71 self.zeros_order = int(zeros_order) 72 self.poles_order = int(poles_order) 73 self.forgetting_factor = float(forgetting_factor) 74 self.epsilon = float(epsilon) 75 76 self.n_coeffs = int(self.poles_order + 1 + self.zeros_order) 77 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 78 79 self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64) 80 81 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 82 self.d_buffer = np.zeros(self.poles_order, dtype=np.float64) 83 84 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 85 """ 86 Enforces IIR stability by reflecting poles outside the unit circle back inside. 87 """ 88 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 89 poles: np.ndarray = np.roots(poly_coeffs) 90 mask: np.ndarray = np.abs(poles) > 1.0 91 92 if np.any(mask): 93 poles[mask] = 1.0 / np.conj(poles[mask]) 94 new_poly: np.ndarray = np.poly(poles) 95 return -np.real(new_poly[1:]) 96 return a_coeffs 97 98 @ensure_real_signals 99 @validate_input 100 def optimize( 101 self, 102 input_signal: np.ndarray, 103 desired_signal: np.ndarray, 104 verbose: bool = False, 105 return_internal_states: bool = False, 106 ) -> OptimizationResult: 107 """ 108 Executes the Equation Error RLS algorithm for IIR filters. 109 110 Parameters 111 ---------- 112 input_signal: 113 Input signal x[k]. 114 desired_signal: 115 Desired signal d[k]. 116 verbose: 117 If True, prints runtime. 118 return_internal_states: 119 If True, returns pole coefficients trajectory in result.extra. 120 121 Returns 122 ------- 123 OptimizationResult 124 outputs: 125 Filter output y[k] computed from the output equation. 126 errors: 127 Output error e[k] = d[k] - y[k]. 128 coefficients: 129 History of coefficients stored in the base class. 130 error_type: 131 "equation_error". 132 133 Extra (always) 134 ------------- 135 extra["auxiliary_errors"]: 136 Equation-error based auxiliary error sequence. 137 138 Extra (when return_internal_states=True) 139 -------------------------------------- 140 extra["a_coefficients"]: 141 Trajectory of denominator (pole) coefficients, shape (N, poles_order). 142 """ 143 tic: float = time() 144 145 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 146 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 147 148 n_samples: int = int(x.size) 149 150 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 151 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 152 errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64) 153 154 a_track: Optional[np.ndarray] = ( 155 np.zeros((n_samples, self.poles_order), dtype=np.float64) 156 if (return_internal_states and self.poles_order > 0) 157 else None 158 ) 159 160 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 161 x_padded[self.zeros_order:] = x 162 163 for k in range(n_samples): 164 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 165 reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 166 reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x)) 167 168 y_out: float = float(np.dot(self.w, reg_y)) 169 y_equation: float = float(np.dot(self.w, reg_e)) 170 171 outputs[k] = y_out 172 errors[k] = float(d[k] - y_out) 173 errors_aux[k] = float(d[k] - y_equation) 174 175 psi: np.ndarray = self.Sd @ reg_e 176 den: float = float(self.forgetting_factor + reg_e.T @ psi) 177 178 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 179 self.w += (self.Sd @ reg_e) * errors_aux[k] 180 181 if self.poles_order > 0: 182 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 183 184 if return_internal_states and a_track is not None: 185 a_track[k, :] = self.w[: self.poles_order] 186 187 self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1])) 188 self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1])) 189 190 self._record_history() 191 192 runtime_s: float = float(time() - tic) 193 if verbose: 194 print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms") 195 196 extra: Dict[str, Any] = { 197 "auxiliary_errors": errors_aux, 198 } 199 if return_internal_states: 200 extra["a_coefficients"] = a_track 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="equation_error", 207 extra=extra, 208 )
Implements the Equation Error RLS algorithm for real-valued IIR adaptive filtering.
41 def __init__( 42 self, 43 zeros_order: int, 44 poles_order: int, 45 forgetting_factor: float = 0.99, 46 epsilon: float = 1e-3, 47 w_init: Optional[Union[np.ndarray, list]] = None, 48 ) -> None: 49 """ 50 Parameters 51 ---------- 52 zeros_order: 53 Numerator order (number of zeros). 54 poles_order: 55 Denominator order (number of poles). 56 forgetting_factor: 57 Forgetting factor (lambda), typically close to 1. 58 epsilon: 59 Regularization / initialization parameter for the inverse correlation matrix. 60 w_init: 61 Optional initial coefficient vector. If None, initializes to zeros. 62 63 Notes 64 ----- 65 Coefficient vector convention: 66 - First `poles_order` entries correspond to denominator (pole) parameters. 67 - Remaining entries correspond to numerator (zero) parameters. 68 """ 69 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 70 71 self.zeros_order = int(zeros_order) 72 self.poles_order = int(poles_order) 73 self.forgetting_factor = float(forgetting_factor) 74 self.epsilon = float(epsilon) 75 76 self.n_coeffs = int(self.poles_order + 1 + self.zeros_order) 77 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 78 79 self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64) 80 81 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 82 self.d_buffer = np.zeros(self.poles_order, dtype=np.float64)
Parameters
zeros_order: Numerator order (number of zeros). poles_order: Denominator order (number of poles). forgetting_factor: Forgetting factor (lambda), typically close to 1. epsilon: Regularization / initialization parameter for the inverse correlation matrix. w_init: Optional initial coefficient vector. If None, initializes to zeros.
Notes
Coefficient vector convention:
- First
poles_orderentries correspond to denominator (pole) parameters. - Remaining entries correspond to numerator (zero) parameters.
98 @ensure_real_signals 99 @validate_input 100 def optimize( 101 self, 102 input_signal: np.ndarray, 103 desired_signal: np.ndarray, 104 verbose: bool = False, 105 return_internal_states: bool = False, 106 ) -> OptimizationResult: 107 """ 108 Executes the Equation Error RLS algorithm for IIR filters. 109 110 Parameters 111 ---------- 112 input_signal: 113 Input signal x[k]. 114 desired_signal: 115 Desired signal d[k]. 116 verbose: 117 If True, prints runtime. 118 return_internal_states: 119 If True, returns pole coefficients trajectory in result.extra. 120 121 Returns 122 ------- 123 OptimizationResult 124 outputs: 125 Filter output y[k] computed from the output equation. 126 errors: 127 Output error e[k] = d[k] - y[k]. 128 coefficients: 129 History of coefficients stored in the base class. 130 error_type: 131 "equation_error". 132 133 Extra (always) 134 ------------- 135 extra["auxiliary_errors"]: 136 Equation-error based auxiliary error sequence. 137 138 Extra (when return_internal_states=True) 139 -------------------------------------- 140 extra["a_coefficients"]: 141 Trajectory of denominator (pole) coefficients, shape (N, poles_order). 142 """ 143 tic: float = time() 144 145 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 146 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 147 148 n_samples: int = int(x.size) 149 150 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 151 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 152 errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64) 153 154 a_track: Optional[np.ndarray] = ( 155 np.zeros((n_samples, self.poles_order), dtype=np.float64) 156 if (return_internal_states and self.poles_order > 0) 157 else None 158 ) 159 160 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 161 x_padded[self.zeros_order:] = x 162 163 for k in range(n_samples): 164 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 165 reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 166 reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x)) 167 168 y_out: float = float(np.dot(self.w, reg_y)) 169 y_equation: float = float(np.dot(self.w, reg_e)) 170 171 outputs[k] = y_out 172 errors[k] = float(d[k] - y_out) 173 errors_aux[k] = float(d[k] - y_equation) 174 175 psi: np.ndarray = self.Sd @ reg_e 176 den: float = float(self.forgetting_factor + reg_e.T @ psi) 177 178 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 179 self.w += (self.Sd @ reg_e) * errors_aux[k] 180 181 if self.poles_order > 0: 182 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 183 184 if return_internal_states and a_track is not None: 185 a_track[k, :] = self.w[: self.poles_order] 186 187 self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1])) 188 self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1])) 189 190 self._record_history() 191 192 runtime_s: float = float(time() - tic) 193 if verbose: 194 print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms") 195 196 extra: Dict[str, Any] = { 197 "auxiliary_errors": errors_aux, 198 } 199 if return_internal_states: 200 extra["a_coefficients"] = a_track 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="equation_error", 207 extra=extra, 208 )
Executes the Equation Error RLS algorithm for IIR filters.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns pole coefficients trajectory in result.extra.
Returns
OptimizationResult outputs: Filter output y[k] computed from the output equation. errors: Output error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "equation_error".
Extra (always)
extra["auxiliary_errors"]: Equation-error based auxiliary error sequence.
Extra (when return_internal_states=True)
extra["a_coefficients"]: Trajectory of denominator (pole) coefficients, shape (N, poles_order).
26class GaussNewton(AdaptiveFilter): 27 """ 28 Implements the Gauss-Newton algorithm for real-valued IIR adaptive filters. 29 30 Notes 31 ----- 32 Coefficient vector convention: 33 - First `poles_order` entries correspond to denominator (pole) parameters. 34 - Remaining entries correspond to numerator (zero) parameters. 35 """ 36 supports_complex: bool = False 37 38 zeros_order: int 39 poles_order: int 40 alpha: float 41 step_size: float 42 delta: float 43 n_coeffs: int 44 Sd: np.ndarray 45 y_buffer: np.ndarray 46 x_line_buffer: np.ndarray 47 y_line_buffer: np.ndarray 48 49 def __init__( 50 self, 51 zeros_order: int, 52 poles_order: int, 53 alpha: float = 0.05, 54 step_size: float = 1.0, 55 delta: float = 1e-3, 56 w_init: Optional[Union[np.ndarray, list]] = None, 57 ) -> None: 58 """ 59 Parameters 60 ---------- 61 zeros_order: 62 Numerator order (number of zeros). 63 poles_order: 64 Denominator order (number of poles). 65 alpha: 66 Exponential weighting factor used in the recursion (0 < alpha < 1). 67 step_size: 68 Step size applied to the Gauss-Newton update. 69 delta: 70 Regularization parameter used to initialize Sd. 71 w_init: 72 Optional initial coefficient vector. If None, initializes to zeros. 73 """ 74 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 75 76 self.zeros_order = int(zeros_order) 77 self.poles_order = int(poles_order) 78 self.alpha = float(alpha) 79 self.step_size = float(step_size) 80 self.delta = float(delta) 81 82 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 83 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 84 85 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 86 87 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 88 89 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 90 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 91 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 92 93 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 94 """ 95 Enforces IIR stability by reflecting poles outside the unit circle back inside. 96 """ 97 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 98 poles: np.ndarray = np.roots(poly_coeffs) 99 mask: np.ndarray = np.abs(poles) > 1.0 100 if np.any(mask): 101 poles[mask] = 1.0 / np.conj(poles[mask]) 102 new_poly: np.ndarray = np.poly(poles) 103 return -np.real(new_poly[1:]) 104 return a_coeffs 105 106 @ensure_real_signals 107 @validate_input 108 def optimize( 109 self, 110 input_signal: np.ndarray, 111 desired_signal: np.ndarray, 112 verbose: bool = False, 113 return_internal_states: bool = False, 114 ) -> OptimizationResult: 115 """ 116 Executes the Gauss-Newton adaptation. 117 118 Parameters 119 ---------- 120 input_signal: 121 Input signal x[k]. 122 desired_signal: 123 Desired signal d[k]. 124 verbose: 125 If True, prints runtime. 126 return_internal_states: 127 If True, returns sensitivity signals in result.extra. 128 129 Returns 130 ------- 131 OptimizationResult 132 outputs: 133 Filter output y[k]. 134 errors: 135 Output error e[k] = d[k] - y[k]. 136 coefficients: 137 History of coefficients stored in the base class. 138 error_type: 139 "output_error". 140 141 Extra (when return_internal_states=True) 142 -------------------------------------- 143 extra["x_sensitivity"]: 144 Sensitivity-related track (x_line), length N. 145 extra["y_sensitivity"]: 146 Sensitivity-related track (y_line), length N. 147 """ 148 tic: float = time() 149 150 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 151 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 152 153 n_samples: int = int(x.size) 154 155 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 156 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 157 158 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 159 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 160 161 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 162 x_padded[self.zeros_order:] = x 163 164 inv_alpha: float = float(1.0 - self.alpha) 165 alpha_ratio: float = float(inv_alpha / self.alpha) 166 167 for k in range(n_samples): 168 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 169 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 170 171 y_k: float = float(np.dot(self.w, regressor)) 172 outputs[k] = y_k 173 174 e_k: float = float(d[k] - y_k) 175 errors[k] = e_k 176 177 a_coeffs: np.ndarray = self.w[: self.poles_order] 178 179 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 180 181 y_line_k: float = 0.0 182 if self.poles_order > 0: 183 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 184 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 185 186 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 187 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 188 189 if return_internal_states and x_line_track is not None and y_line_track is not None: 190 x_line_track[k] = x_line_k 191 y_line_track[k] = y_line_k 192 193 phi: np.ndarray = np.concatenate( 194 ( 195 self.y_line_buffer[: self.poles_order], 196 -self.x_line_buffer[: self.zeros_order + 1], 197 ) 198 ) 199 200 psi: np.ndarray = self.Sd @ phi 201 den: float = float(alpha_ratio + phi.T @ psi) 202 203 self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den) 204 self.w = self.w - self.step_size * (self.Sd @ phi) * e_k 205 206 if self.poles_order > 0: 207 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 208 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 209 210 self._record_history() 211 212 runtime_s: float = float(time() - tic) 213 if verbose: 214 print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms") 215 216 extra: Optional[Dict[str, Any]] = None 217 if return_internal_states: 218 extra = { 219 "x_sensitivity": x_line_track, 220 "y_sensitivity": y_line_track, 221 } 222 223 return self._pack_results( 224 outputs=outputs, 225 errors=errors, 226 runtime_s=runtime_s, 227 error_type="output_error", 228 extra=extra, 229 )
Implements the Gauss-Newton algorithm for real-valued IIR adaptive filters.
Notes
Coefficient vector convention:
- First
poles_orderentries correspond to denominator (pole) parameters. - Remaining entries correspond to numerator (zero) parameters.
49 def __init__( 50 self, 51 zeros_order: int, 52 poles_order: int, 53 alpha: float = 0.05, 54 step_size: float = 1.0, 55 delta: float = 1e-3, 56 w_init: Optional[Union[np.ndarray, list]] = None, 57 ) -> None: 58 """ 59 Parameters 60 ---------- 61 zeros_order: 62 Numerator order (number of zeros). 63 poles_order: 64 Denominator order (number of poles). 65 alpha: 66 Exponential weighting factor used in the recursion (0 < alpha < 1). 67 step_size: 68 Step size applied to the Gauss-Newton update. 69 delta: 70 Regularization parameter used to initialize Sd. 71 w_init: 72 Optional initial coefficient vector. If None, initializes to zeros. 73 """ 74 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 75 76 self.zeros_order = int(zeros_order) 77 self.poles_order = int(poles_order) 78 self.alpha = float(alpha) 79 self.step_size = float(step_size) 80 self.delta = float(delta) 81 82 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 83 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 84 85 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 86 87 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 88 89 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 90 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 91 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
Parameters
zeros_order: Numerator order (number of zeros). poles_order: Denominator order (number of poles). alpha: Exponential weighting factor used in the recursion (0 < alpha < 1). step_size: Step size applied to the Gauss-Newton update. delta: Regularization parameter used to initialize Sd. w_init: Optional initial coefficient vector. If None, initializes to zeros.
106 @ensure_real_signals 107 @validate_input 108 def optimize( 109 self, 110 input_signal: np.ndarray, 111 desired_signal: np.ndarray, 112 verbose: bool = False, 113 return_internal_states: bool = False, 114 ) -> OptimizationResult: 115 """ 116 Executes the Gauss-Newton adaptation. 117 118 Parameters 119 ---------- 120 input_signal: 121 Input signal x[k]. 122 desired_signal: 123 Desired signal d[k]. 124 verbose: 125 If True, prints runtime. 126 return_internal_states: 127 If True, returns sensitivity signals in result.extra. 128 129 Returns 130 ------- 131 OptimizationResult 132 outputs: 133 Filter output y[k]. 134 errors: 135 Output error e[k] = d[k] - y[k]. 136 coefficients: 137 History of coefficients stored in the base class. 138 error_type: 139 "output_error". 140 141 Extra (when return_internal_states=True) 142 -------------------------------------- 143 extra["x_sensitivity"]: 144 Sensitivity-related track (x_line), length N. 145 extra["y_sensitivity"]: 146 Sensitivity-related track (y_line), length N. 147 """ 148 tic: float = time() 149 150 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 151 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 152 153 n_samples: int = int(x.size) 154 155 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 156 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 157 158 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 159 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 160 161 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 162 x_padded[self.zeros_order:] = x 163 164 inv_alpha: float = float(1.0 - self.alpha) 165 alpha_ratio: float = float(inv_alpha / self.alpha) 166 167 for k in range(n_samples): 168 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 169 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 170 171 y_k: float = float(np.dot(self.w, regressor)) 172 outputs[k] = y_k 173 174 e_k: float = float(d[k] - y_k) 175 errors[k] = e_k 176 177 a_coeffs: np.ndarray = self.w[: self.poles_order] 178 179 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 180 181 y_line_k: float = 0.0 182 if self.poles_order > 0: 183 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 184 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 185 186 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 187 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 188 189 if return_internal_states and x_line_track is not None and y_line_track is not None: 190 x_line_track[k] = x_line_k 191 y_line_track[k] = y_line_k 192 193 phi: np.ndarray = np.concatenate( 194 ( 195 self.y_line_buffer[: self.poles_order], 196 -self.x_line_buffer[: self.zeros_order + 1], 197 ) 198 ) 199 200 psi: np.ndarray = self.Sd @ phi 201 den: float = float(alpha_ratio + phi.T @ psi) 202 203 self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den) 204 self.w = self.w - self.step_size * (self.Sd @ phi) * e_k 205 206 if self.poles_order > 0: 207 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 208 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 209 210 self._record_history() 211 212 runtime_s: float = float(time() - tic) 213 if verbose: 214 print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms") 215 216 extra: Optional[Dict[str, Any]] = None 217 if return_internal_states: 218 extra = { 219 "x_sensitivity": x_line_track, 220 "y_sensitivity": y_line_track, 221 } 222 223 return self._pack_results( 224 outputs=outputs, 225 errors=errors, 226 runtime_s=runtime_s, 227 error_type="output_error", 228 extra=extra, 229 )
Executes the Gauss-Newton adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns sensitivity signals in result.extra.
Returns
OptimizationResult outputs: Filter output y[k]. errors: Output error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "output_error".
Extra (when return_internal_states=True)
extra["x_sensitivity"]: Sensitivity-related track (x_line), length N. extra["y_sensitivity"]: Sensitivity-related track (y_line), length N.
26class GaussNewtonGradient(AdaptiveFilter): 27 """ 28 Implements the gradient-based Gauss-Newton algorithm for real-valued IIR adaptive filters. 29 """ 30 supports_complex: bool = False 31 32 zeros_order: int 33 poles_order: int 34 step_size: float 35 n_coeffs: int 36 y_buffer: np.ndarray 37 x_line_buffer: np.ndarray 38 y_line_buffer: np.ndarray 39 40 def __init__( 41 self, 42 zeros_order: int, 43 poles_order: int, 44 step_size: float = 1e-3, 45 w_init: Optional[Union[np.ndarray, list]] = None, 46 ) -> None: 47 """ 48 Parameters 49 ---------- 50 zeros_order: 51 Numerator order (number of zeros). 52 poles_order: 53 Denominator order (number of poles). 54 step_size: 55 Gradient step size. 56 w_init: 57 Optional initial coefficient vector. If None, initializes to zeros. 58 59 Notes 60 ----- 61 Coefficient vector convention: 62 - First `poles_order` entries correspond to denominator (pole) parameters. 63 - Remaining entries correspond to numerator (zero) parameters. 64 """ 65 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 66 67 self.zeros_order = int(zeros_order) 68 self.poles_order = int(poles_order) 69 self.step_size = float(step_size) 70 71 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 72 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 73 74 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 75 76 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 77 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 78 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 79 80 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 81 """ 82 Enforces IIR stability by reflecting poles outside the unit circle back inside. 83 """ 84 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 85 poles: np.ndarray = np.roots(poly_coeffs) 86 mask: np.ndarray = np.abs(poles) > 1.0 87 if np.any(mask): 88 poles[mask] = 1.0 / np.conj(poles[mask]) 89 new_poly: np.ndarray = np.poly(poles) 90 return -np.real(new_poly[1:]) 91 return a_coeffs 92 93 @ensure_real_signals 94 @validate_input 95 def optimize( 96 self, 97 input_signal: np.ndarray, 98 desired_signal: np.ndarray, 99 verbose: bool = False, 100 return_internal_states: bool = False, 101 ) -> OptimizationResult: 102 """ 103 Executes the gradient-based Gauss-Newton adaptation. 104 105 Parameters 106 ---------- 107 input_signal: 108 Input signal x[k]. 109 desired_signal: 110 Desired signal d[k]. 111 verbose: 112 If True, prints runtime. 113 return_internal_states: 114 If True, returns sensitivity signals in result.extra. 115 116 Returns 117 ------- 118 OptimizationResult 119 outputs: 120 Filter output y[k]. 121 errors: 122 Output error e[k] = d[k] - y[k]. 123 coefficients: 124 History of coefficients stored in the base class. 125 error_type: 126 "output_error". 127 128 Extra (when return_internal_states=True) 129 -------------------------------------- 130 extra["x_sensitivity"]: 131 Sensitivity-related track (x_line), length N. 132 extra["y_sensitivity"]: 133 Sensitivity-related track (y_line), length N. 134 """ 135 tic: float = time() 136 137 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 138 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 139 140 n_samples: int = int(x.size) 141 142 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 143 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 144 145 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 146 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 147 148 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 149 x_padded[self.zeros_order:] = x 150 151 for k in range(n_samples): 152 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 153 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 154 155 y_k: float = float(np.dot(self.w, regressor)) 156 outputs[k] = y_k 157 158 e_k: float = float(d[k] - y_k) 159 errors[k] = e_k 160 161 a_coeffs: np.ndarray = self.w[: self.poles_order] 162 163 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 164 165 y_line_k: float = 0.0 166 if self.poles_order > 0: 167 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 168 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 169 170 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 171 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 172 173 if return_internal_states and x_line_track is not None and y_line_track is not None: 174 x_line_track[k] = x_line_k 175 y_line_track[k] = y_line_k 176 177 phi: np.ndarray = np.concatenate( 178 ( 179 self.y_line_buffer[: self.poles_order], 180 -self.x_line_buffer[: self.zeros_order + 1], 181 ) 182 ) 183 184 self.w -= self.step_size * phi * e_k 185 186 if self.poles_order > 0: 187 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 188 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 189 190 self._record_history() 191 192 runtime_s: float = float(time() - tic) 193 if verbose: 194 print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms") 195 196 extra: Optional[Dict[str, Any]] = None 197 if return_internal_states: 198 extra = { 199 "x_sensitivity": x_line_track, 200 "y_sensitivity": y_line_track, 201 } 202 203 return self._pack_results( 204 outputs=outputs, 205 errors=errors, 206 runtime_s=runtime_s, 207 error_type="output_error", 208 extra=extra, 209 )
Implements the gradient-based Gauss-Newton algorithm for real-valued IIR adaptive filters.
40 def __init__( 41 self, 42 zeros_order: int, 43 poles_order: int, 44 step_size: float = 1e-3, 45 w_init: Optional[Union[np.ndarray, list]] = None, 46 ) -> None: 47 """ 48 Parameters 49 ---------- 50 zeros_order: 51 Numerator order (number of zeros). 52 poles_order: 53 Denominator order (number of poles). 54 step_size: 55 Gradient step size. 56 w_init: 57 Optional initial coefficient vector. If None, initializes to zeros. 58 59 Notes 60 ----- 61 Coefficient vector convention: 62 - First `poles_order` entries correspond to denominator (pole) parameters. 63 - Remaining entries correspond to numerator (zero) parameters. 64 """ 65 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 66 67 self.zeros_order = int(zeros_order) 68 self.poles_order = int(poles_order) 69 self.step_size = float(step_size) 70 71 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 72 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 73 74 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 75 76 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 77 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 78 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
Parameters
zeros_order: Numerator order (number of zeros). poles_order: Denominator order (number of poles). step_size: Gradient step size. w_init: Optional initial coefficient vector. If None, initializes to zeros.
Notes
Coefficient vector convention:
- First
poles_orderentries correspond to denominator (pole) parameters. - Remaining entries correspond to numerator (zero) parameters.
93 @ensure_real_signals 94 @validate_input 95 def optimize( 96 self, 97 input_signal: np.ndarray, 98 desired_signal: np.ndarray, 99 verbose: bool = False, 100 return_internal_states: bool = False, 101 ) -> OptimizationResult: 102 """ 103 Executes the gradient-based Gauss-Newton adaptation. 104 105 Parameters 106 ---------- 107 input_signal: 108 Input signal x[k]. 109 desired_signal: 110 Desired signal d[k]. 111 verbose: 112 If True, prints runtime. 113 return_internal_states: 114 If True, returns sensitivity signals in result.extra. 115 116 Returns 117 ------- 118 OptimizationResult 119 outputs: 120 Filter output y[k]. 121 errors: 122 Output error e[k] = d[k] - y[k]. 123 coefficients: 124 History of coefficients stored in the base class. 125 error_type: 126 "output_error". 127 128 Extra (when return_internal_states=True) 129 -------------------------------------- 130 extra["x_sensitivity"]: 131 Sensitivity-related track (x_line), length N. 132 extra["y_sensitivity"]: 133 Sensitivity-related track (y_line), length N. 134 """ 135 tic: float = time() 136 137 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 138 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 139 140 n_samples: int = int(x.size) 141 142 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 143 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 144 145 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 146 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 147 148 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 149 x_padded[self.zeros_order:] = x 150 151 for k in range(n_samples): 152 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 153 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 154 155 y_k: float = float(np.dot(self.w, regressor)) 156 outputs[k] = y_k 157 158 e_k: float = float(d[k] - y_k) 159 errors[k] = e_k 160 161 a_coeffs: np.ndarray = self.w[: self.poles_order] 162 163 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 164 165 y_line_k: float = 0.0 166 if self.poles_order > 0: 167 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 168 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 169 170 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 171 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 172 173 if return_internal_states and x_line_track is not None and y_line_track is not None: 174 x_line_track[k] = x_line_k 175 y_line_track[k] = y_line_k 176 177 phi: np.ndarray = np.concatenate( 178 ( 179 self.y_line_buffer[: self.poles_order], 180 -self.x_line_buffer[: self.zeros_order + 1], 181 ) 182 ) 183 184 self.w -= self.step_size * phi * e_k 185 186 if self.poles_order > 0: 187 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 188 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 189 190 self._record_history() 191 192 runtime_s: float = float(time() - tic) 193 if verbose: 194 print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms") 195 196 extra: Optional[Dict[str, Any]] = None 197 if return_internal_states: 198 extra = { 199 "x_sensitivity": x_line_track, 200 "y_sensitivity": y_line_track, 201 } 202 203 return self._pack_results( 204 outputs=outputs, 205 errors=errors, 206 runtime_s=runtime_s, 207 error_type="output_error", 208 extra=extra, 209 )
Executes the gradient-based Gauss-Newton adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns sensitivity signals in result.extra.
Returns
OptimizationResult outputs: Filter output y[k]. errors: Output error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "output_error".
Extra (when return_internal_states=True)
extra["x_sensitivity"]: Sensitivity-related track (x_line), length N. extra["y_sensitivity"]: Sensitivity-related track (y_line), length N.
27class RLSIIR(AdaptiveFilter): 28 """ 29 Implements the RLS version of the Output Error algorithm for real-valued IIR adaptive filters. 30 31 Notes 32 ----- 33 Coefficient vector convention: 34 - First `poles_order` entries correspond to denominator (pole) parameters. 35 - Remaining entries correspond to numerator (zero) parameters. 36 """ 37 supports_complex: bool = False 38 39 zeros_order: int 40 poles_order: int 41 forgetting_factor: float 42 delta: float 43 n_coeffs: int 44 Sd: np.ndarray 45 y_buffer: np.ndarray 46 x_line_buffer: np.ndarray 47 y_line_buffer: np.ndarray 48 49 def __init__( 50 self, 51 zeros_order: int, 52 poles_order: int, 53 forgetting_factor: float = 0.99, 54 delta: float = 1e-3, 55 w_init: Optional[Union[np.ndarray, list]] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 zeros_order: 61 Numerator order (number of zeros). 62 poles_order: 63 Denominator order (number of poles). 64 forgetting_factor: 65 Forgetting factor (lambda), typically close to 1. 66 delta: 67 Regularization parameter used to initialize Sd (inverse covariance). 68 w_init: 69 Optional initial coefficient vector. If None, initializes to zeros. 70 """ 71 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 72 73 self.zeros_order = int(zeros_order) 74 self.poles_order = int(poles_order) 75 self.forgetting_factor = float(forgetting_factor) 76 self.delta = float(delta) 77 78 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 79 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 80 81 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 82 83 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 84 85 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 86 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 87 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 88 89 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 90 """ 91 Enforces IIR stability by reflecting poles outside the unit circle back inside. 92 """ 93 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 94 poles: np.ndarray = np.roots(poly_coeffs) 95 mask: np.ndarray = np.abs(poles) > 1.0 96 if np.any(mask): 97 poles[mask] = 1.0 / np.conj(poles[mask]) 98 new_poly: np.ndarray = np.poly(poles) 99 return -np.real(new_poly[1:]) 100 return a_coeffs 101 102 @ensure_real_signals 103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 return_internal_states: bool = False, 110 ) -> OptimizationResult: 111 """ 112 Executes the RLS-IIR (Output Error) adaptation. 113 114 Parameters 115 ---------- 116 input_signal: 117 Input signal x[k]. 118 desired_signal: 119 Desired signal d[k]. 120 verbose: 121 If True, prints runtime. 122 return_internal_states: 123 If True, returns sensitivity signals in result.extra. 124 125 Returns 126 ------- 127 OptimizationResult 128 outputs: 129 Filter output y[k]. 130 errors: 131 Output error e[k] = d[k] - y[k]. 132 coefficients: 133 History of coefficients stored in the base class. 134 error_type: 135 "output_error". 136 137 Extra (when return_internal_states=True) 138 -------------------------------------- 139 extra["x_sensitivity"]: 140 Sensitivity-related track (x_line), length N. 141 extra["y_sensitivity"]: 142 Sensitivity-related track (y_line), length N. 143 """ 144 tic: float = time() 145 146 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 147 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 148 149 n_samples: int = int(x.size) 150 151 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 152 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 153 154 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 155 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 156 157 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 158 x_padded[self.zeros_order:] = x 159 160 for k in range(n_samples): 161 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 162 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 163 164 y_k: float = float(np.dot(self.w, regressor)) 165 outputs[k] = y_k 166 167 e_k: float = float(d[k] - y_k) 168 errors[k] = e_k 169 170 a_coeffs: np.ndarray = self.w[: self.poles_order] 171 172 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 173 174 y_line_k: float = 0.0 175 if self.poles_order > 0: 176 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 177 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 178 179 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 180 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 181 182 if return_internal_states and x_line_track is not None and y_line_track is not None: 183 x_line_track[k] = x_line_k 184 y_line_track[k] = y_line_k 185 186 phi: np.ndarray = np.concatenate( 187 ( 188 self.y_line_buffer[: self.poles_order], 189 -self.x_line_buffer[: self.zeros_order + 1], 190 ) 191 ) 192 193 psi: np.ndarray = self.Sd @ phi 194 den: float = float(self.forgetting_factor + phi.T @ psi) 195 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 196 197 self.w -= (self.Sd @ phi) * e_k 198 199 if self.poles_order > 0: 200 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 201 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 202 203 self._record_history() 204 205 runtime_s: float = float(time() - tic) 206 if verbose: 207 print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms") 208 209 extra: Optional[Dict[str, Any]] = None 210 if return_internal_states: 211 extra = { 212 "x_sensitivity": x_line_track, 213 "y_sensitivity": y_line_track, 214 } 215 216 return self._pack_results( 217 outputs=outputs, 218 errors=errors, 219 runtime_s=runtime_s, 220 error_type="output_error", 221 extra=extra, 222 )
Implements the RLS version of the Output Error algorithm for real-valued IIR adaptive filters.
Notes
Coefficient vector convention:
- First
poles_orderentries correspond to denominator (pole) parameters. - Remaining entries correspond to numerator (zero) parameters.
49 def __init__( 50 self, 51 zeros_order: int, 52 poles_order: int, 53 forgetting_factor: float = 0.99, 54 delta: float = 1e-3, 55 w_init: Optional[Union[np.ndarray, list]] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 zeros_order: 61 Numerator order (number of zeros). 62 poles_order: 63 Denominator order (number of poles). 64 forgetting_factor: 65 Forgetting factor (lambda), typically close to 1. 66 delta: 67 Regularization parameter used to initialize Sd (inverse covariance). 68 w_init: 69 Optional initial coefficient vector. If None, initializes to zeros. 70 """ 71 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 72 73 self.zeros_order = int(zeros_order) 74 self.poles_order = int(poles_order) 75 self.forgetting_factor = float(forgetting_factor) 76 self.delta = float(delta) 77 78 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 79 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 80 81 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 82 83 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 84 85 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 86 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 87 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
Parameters
zeros_order: Numerator order (number of zeros). poles_order: Denominator order (number of poles). forgetting_factor: Forgetting factor (lambda), typically close to 1. delta: Regularization parameter used to initialize Sd (inverse covariance). w_init: Optional initial coefficient vector. If None, initializes to zeros.
102 @ensure_real_signals 103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 return_internal_states: bool = False, 110 ) -> OptimizationResult: 111 """ 112 Executes the RLS-IIR (Output Error) adaptation. 113 114 Parameters 115 ---------- 116 input_signal: 117 Input signal x[k]. 118 desired_signal: 119 Desired signal d[k]. 120 verbose: 121 If True, prints runtime. 122 return_internal_states: 123 If True, returns sensitivity signals in result.extra. 124 125 Returns 126 ------- 127 OptimizationResult 128 outputs: 129 Filter output y[k]. 130 errors: 131 Output error e[k] = d[k] - y[k]. 132 coefficients: 133 History of coefficients stored in the base class. 134 error_type: 135 "output_error". 136 137 Extra (when return_internal_states=True) 138 -------------------------------------- 139 extra["x_sensitivity"]: 140 Sensitivity-related track (x_line), length N. 141 extra["y_sensitivity"]: 142 Sensitivity-related track (y_line), length N. 143 """ 144 tic: float = time() 145 146 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 147 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 148 149 n_samples: int = int(x.size) 150 151 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 152 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 153 154 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 155 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 156 157 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 158 x_padded[self.zeros_order:] = x 159 160 for k in range(n_samples): 161 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 162 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 163 164 y_k: float = float(np.dot(self.w, regressor)) 165 outputs[k] = y_k 166 167 e_k: float = float(d[k] - y_k) 168 errors[k] = e_k 169 170 a_coeffs: np.ndarray = self.w[: self.poles_order] 171 172 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 173 174 y_line_k: float = 0.0 175 if self.poles_order > 0: 176 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 177 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 178 179 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 180 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 181 182 if return_internal_states and x_line_track is not None and y_line_track is not None: 183 x_line_track[k] = x_line_k 184 y_line_track[k] = y_line_k 185 186 phi: np.ndarray = np.concatenate( 187 ( 188 self.y_line_buffer[: self.poles_order], 189 -self.x_line_buffer[: self.zeros_order + 1], 190 ) 191 ) 192 193 psi: np.ndarray = self.Sd @ phi 194 den: float = float(self.forgetting_factor + phi.T @ psi) 195 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 196 197 self.w -= (self.Sd @ phi) * e_k 198 199 if self.poles_order > 0: 200 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 201 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 202 203 self._record_history() 204 205 runtime_s: float = float(time() - tic) 206 if verbose: 207 print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms") 208 209 extra: Optional[Dict[str, Any]] = None 210 if return_internal_states: 211 extra = { 212 "x_sensitivity": x_line_track, 213 "y_sensitivity": y_line_track, 214 } 215 216 return self._pack_results( 217 outputs=outputs, 218 errors=errors, 219 runtime_s=runtime_s, 220 error_type="output_error", 221 extra=extra, 222 )
Executes the RLS-IIR (Output Error) adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns sensitivity signals in result.extra.
Returns
OptimizationResult outputs: Filter output y[k]. errors: Output error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class. error_type: "output_error".
Extra (when return_internal_states=True)
extra["x_sensitivity"]: Sensitivity-related track (x_line), length N. extra["y_sensitivity"]: Sensitivity-related track (y_line), length N.
26class SteiglitzMcBride(AdaptiveFilter): 27 """ 28 Implements the Steiglitz-McBride (SM) algorithm for real-valued IIR adaptive filters. 29 30 Notes 31 ----- 32 This implementation follows the *classic* Steiglitz-McBride idea: 33 - Build a prefiltered (approximately linear) regression using the current denominator estimate. 34 - Update coefficients using the *filtered equation error* (auxiliary error). 35 36 Coefficient vector convention: 37 - First `poles_order` entries correspond to denominator (pole) parameters. 38 - Remaining entries correspond to numerator (zero) parameters. 39 """ 40 supports_complex: bool = False 41 42 zeros_order: int 43 poles_order: int 44 step_size: float 45 n_coeffs: int 46 y_buffer: np.ndarray 47 xf_buffer: np.ndarray 48 df_buffer: np.ndarray 49 50 def __init__( 51 self, 52 zeros_order: int, 53 poles_order: int, 54 step_size: float = 1e-3, 55 w_init: Optional[Union[np.ndarray, list]] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 zeros_order: 61 Numerator order (number of zeros). 62 poles_order: 63 Denominator order (number of poles). 64 step_size: 65 Step size used in the coefficient update. 66 w_init: 67 Optional initial coefficient vector. If None, initializes to zeros. 68 """ 69 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 70 71 self.zeros_order = int(zeros_order) 72 self.poles_order = int(poles_order) 73 self.step_size = float(step_size) 74 75 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 76 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 77 78 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 79 80 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1)) 81 self.xf_buffer = np.zeros(max_buffer, dtype=np.float64) 82 self.df_buffer = np.zeros(max_buffer, dtype=np.float64) 83 84 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 85 """ 86 Enforces IIR stability by reflecting poles outside the unit circle back inside. 87 """ 88 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 89 poles: np.ndarray = np.roots(poly_coeffs) 90 mask: np.ndarray = np.abs(poles) > 1.0 91 if np.any(mask): 92 poles[mask] = 1.0 / np.conj(poles[mask]) 93 new_poly: np.ndarray = np.poly(poles) 94 return -np.real(new_poly[1:]) 95 return a_coeffs 96 97 @ensure_real_signals 98 @validate_input 99 def optimize( 100 self, 101 input_signal: np.ndarray, 102 desired_signal: np.ndarray, 103 verbose: bool = False, 104 return_internal_states: bool = False, 105 ) -> OptimizationResult: 106 """ 107 Executes the Steiglitz-McBride adaptation. 108 109 Parameters 110 ---------- 111 input_signal: 112 Input signal x[k]. 113 desired_signal: 114 Desired signal d[k]. 115 verbose: 116 If True, prints runtime. 117 return_internal_states: 118 If True, returns the auxiliary (prefiltered) equation error in result.extra. 119 120 Returns 121 ------- 122 OptimizationResult 123 outputs: 124 Output computed from the current IIR model. 125 errors: 126 Output error e[k] = d[k] - y[k] (for evaluation/monitoring). 127 coefficients: 128 History of coefficients stored in the base class. 129 error_type: 130 "a_posteriori". 131 132 Extra (when return_internal_states=True) 133 -------------------------------------- 134 extra["auxiliary_error"]: 135 Filtered equation error sequence e_s[k] used in the SM update. 136 """ 137 tic: float = time() 138 139 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 140 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 141 142 n_samples: int = int(x.size) 143 144 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 145 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 146 errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64) 147 148 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 149 x_padded[self.zeros_order:] = x 150 151 for k in range(n_samples): 152 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 153 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 154 155 y_k: float = float(np.dot(self.w, regressor)) 156 outputs[k] = y_k 157 errors[k] = float(d[k] - y_k) 158 159 a_coeffs: np.ndarray = self.w[: self.poles_order] 160 161 xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order])) 162 df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order])) 163 164 self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1])) 165 self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1])) 166 167 if self.poles_order == 0: 168 regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1] 169 else: 170 regressor_s = np.concatenate( 171 ( 172 self.df_buffer[1 : self.poles_order + 1], 173 self.xf_buffer[: self.zeros_order + 1], 174 ) 175 ) 176 177 e_s_k: float = float(df_k - np.dot(self.w, regressor_s)) 178 errors_s[k] = e_s_k 179 180 self.w += 2.0 * self.step_size * regressor_s * e_s_k 181 182 if self.poles_order > 0: 183 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 184 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 185 186 self._record_history() 187 188 runtime_s: float = float(time() - tic) 189 if verbose: 190 print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms") 191 192 extra: Optional[Dict[str, Any]] = None 193 if return_internal_states: 194 extra = {"auxiliary_error": errors_s} 195 196 return self._pack_results( 197 outputs=outputs, 198 errors=errors, 199 runtime_s=runtime_s, 200 error_type="a_posteriori", 201 extra=extra, 202 )
Implements the Steiglitz-McBride (SM) algorithm for real-valued IIR adaptive filters.
Notes
This implementation follows the classic Steiglitz-McBride idea:
- Build a prefiltered (approximately linear) regression using the current denominator estimate.
- Update coefficients using the filtered equation error (auxiliary error).
Coefficient vector convention:
- First
poles_orderentries correspond to denominator (pole) parameters. - Remaining entries correspond to numerator (zero) parameters.
50 def __init__( 51 self, 52 zeros_order: int, 53 poles_order: int, 54 step_size: float = 1e-3, 55 w_init: Optional[Union[np.ndarray, list]] = None, 56 ) -> None: 57 """ 58 Parameters 59 ---------- 60 zeros_order: 61 Numerator order (number of zeros). 62 poles_order: 63 Denominator order (number of poles). 64 step_size: 65 Step size used in the coefficient update. 66 w_init: 67 Optional initial coefficient vector. If None, initializes to zeros. 68 """ 69 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 70 71 self.zeros_order = int(zeros_order) 72 self.poles_order = int(poles_order) 73 self.step_size = float(step_size) 74 75 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 76 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 77 78 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 79 80 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1)) 81 self.xf_buffer = np.zeros(max_buffer, dtype=np.float64) 82 self.df_buffer = np.zeros(max_buffer, dtype=np.float64)
Parameters
zeros_order: Numerator order (number of zeros). poles_order: Denominator order (number of poles). step_size: Step size used in the coefficient update. w_init: Optional initial coefficient vector. If None, initializes to zeros.
97 @ensure_real_signals 98 @validate_input 99 def optimize( 100 self, 101 input_signal: np.ndarray, 102 desired_signal: np.ndarray, 103 verbose: bool = False, 104 return_internal_states: bool = False, 105 ) -> OptimizationResult: 106 """ 107 Executes the Steiglitz-McBride adaptation. 108 109 Parameters 110 ---------- 111 input_signal: 112 Input signal x[k]. 113 desired_signal: 114 Desired signal d[k]. 115 verbose: 116 If True, prints runtime. 117 return_internal_states: 118 If True, returns the auxiliary (prefiltered) equation error in result.extra. 119 120 Returns 121 ------- 122 OptimizationResult 123 outputs: 124 Output computed from the current IIR model. 125 errors: 126 Output error e[k] = d[k] - y[k] (for evaluation/monitoring). 127 coefficients: 128 History of coefficients stored in the base class. 129 error_type: 130 "a_posteriori". 131 132 Extra (when return_internal_states=True) 133 -------------------------------------- 134 extra["auxiliary_error"]: 135 Filtered equation error sequence e_s[k] used in the SM update. 136 """ 137 tic: float = time() 138 139 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 140 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 141 142 n_samples: int = int(x.size) 143 144 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 145 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 146 errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64) 147 148 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 149 x_padded[self.zeros_order:] = x 150 151 for k in range(n_samples): 152 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 153 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 154 155 y_k: float = float(np.dot(self.w, regressor)) 156 outputs[k] = y_k 157 errors[k] = float(d[k] - y_k) 158 159 a_coeffs: np.ndarray = self.w[: self.poles_order] 160 161 xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order])) 162 df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order])) 163 164 self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1])) 165 self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1])) 166 167 if self.poles_order == 0: 168 regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1] 169 else: 170 regressor_s = np.concatenate( 171 ( 172 self.df_buffer[1 : self.poles_order + 1], 173 self.xf_buffer[: self.zeros_order + 1], 174 ) 175 ) 176 177 e_s_k: float = float(df_k - np.dot(self.w, regressor_s)) 178 errors_s[k] = e_s_k 179 180 self.w += 2.0 * self.step_size * regressor_s * e_s_k 181 182 if self.poles_order > 0: 183 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 184 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 185 186 self._record_history() 187 188 runtime_s: float = float(time() - tic) 189 if verbose: 190 print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms") 191 192 extra: Optional[Dict[str, Any]] = None 193 if return_internal_states: 194 extra = {"auxiliary_error": errors_s} 195 196 return self._pack_results( 197 outputs=outputs, 198 errors=errors, 199 runtime_s=runtime_s, 200 error_type="a_posteriori", 201 extra=extra, 202 )
Executes the Steiglitz-McBride adaptation.
Parameters
input_signal: Input signal x[k]. desired_signal: Desired signal d[k]. verbose: If True, prints runtime. return_internal_states: If True, returns the auxiliary (prefiltered) equation error in result.extra.
Returns
OptimizationResult outputs: Output computed from the current IIR model. errors: Output error e[k] = d[k] - y[k] (for evaluation/monitoring). coefficients: History of coefficients stored in the base class. error_type: "a_posteriori".
Extra (when return_internal_states=True)
extra["auxiliary_error"]: Filtered equation error sequence e_s[k] used in the SM update.
29class BilinearRLS(AdaptiveFilter): 30 """ 31 Bilinear RLS (real-valued). 32 33 Implements a bilinear regressor RLS structure (Algorithm 11.3 - Diniz). 34 The regressor used here has 4 components: 35 36 u[k] = [ x[k], 37 d[k-1], 38 x[k] d[k-1], 39 x[k-1] d[k-1] ]^T 40 41 and the RLS update (a priori form) is: 42 43 y[k] = w^T u[k] 44 e[k] = d[k] - y[k] 45 k[k] = P[k-1] u[k] / (lambda + u[k]^T P[k-1] u[k]) 46 P[k] = (P[k-1] - k[k] u[k]^T P[k-1]) / lambda 47 w[k] = w[k-1] + k[k] e[k] 48 49 Notes 50 ----- 51 - Real-valued only: enforced by `ensure_real_signals`. 52 - Uses the unified base API via `validate_input`. 53 - Returns a priori error by default. 54 """ 55 56 supports_complex: bool = False 57 58 def __init__( 59 self, 60 forgetting_factor: float = 0.98, 61 delta: float = 1.0, 62 w_init: Optional[ArrayLike] = None, 63 *, 64 safe_eps: float = 1e-12, 65 ) -> None: 66 """ 67 Parameters 68 ---------- 69 forgetting_factor: 70 Forgetting factor lambda (0 < lambda <= 1). 71 delta: 72 Regularization factor for initial P = I/delta (delta > 0). 73 w_init: 74 Optional initial coefficients (length 4). If None, zeros. 75 safe_eps: 76 Small epsilon used to guard denominators. 77 """ 78 n_coeffs = 4 79 super().__init__(filter_order=n_coeffs - 1, w_init=w_init) 80 81 self.lambda_factor = float(forgetting_factor) 82 if not (0.0 < self.lambda_factor <= 1.0): 83 raise ValueError( 84 f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}." 85 ) 86 87 self.delta = float(delta) 88 if self.delta <= 0.0: 89 raise ValueError(f"delta must be > 0. Got delta={self.delta}.") 90 91 self._safe_eps = float(safe_eps) 92 93 self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta 94 95 @validate_input 96 @ensure_real_signals 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run Bilinear RLS adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k] (real). 111 desired_signal: 112 Desired signal d[k] (real). 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, returns the last regressor and last gain in result.extra. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 Filter output y[k] (a priori). 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 History of coefficients stored in the base class (length 4). 127 error_type: 128 "a_priori". 129 """ 130 t0 = perf_counter() 131 132 x = np.asarray(input_signal, dtype=np.float64).ravel() 133 d = np.asarray(desired_signal, dtype=np.float64).ravel() 134 135 n_samples = int(x.size) 136 outputs = np.zeros(n_samples, dtype=np.float64) 137 errors = np.zeros(n_samples, dtype=np.float64) 138 139 x_prev = 0.0 140 d_prev = 0.0 141 142 last_u: Optional[np.ndarray] = None 143 last_k: Optional[np.ndarray] = None 144 145 for k in range(n_samples): 146 u = np.array( 147 [x[k], d_prev, x[k] * d_prev, x_prev * d_prev], 148 dtype=np.float64, 149 ) 150 last_u = u 151 152 y_k = float(np.dot(self.w, u)) 153 outputs[k] = y_k 154 155 e_k = float(d[k] - y_k) 156 errors[k] = e_k 157 158 Pu = self.P @ u 159 denom = float(self.lambda_factor + (u @ Pu)) 160 if abs(denom) < self._safe_eps: 161 denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps) 162 163 k_gain = Pu / denom 164 last_k = k_gain 165 166 self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor 167 168 self.w = self.w + k_gain * e_k 169 self._record_history() 170 171 x_prev = float(x[k]) 172 d_prev = float(d[k]) 173 174 runtime_s = float(perf_counter() - t0) 175 if verbose: 176 print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms") 177 178 extra: Optional[Dict[str, Any]] = None 179 if return_internal_states: 180 extra = { 181 "P_last": self.P.copy(), 182 "last_regressor": None if last_u is None else last_u.copy(), 183 "last_gain": None if last_k is None else last_k.copy(), 184 } 185 186 return self._pack_results( 187 outputs=outputs, 188 errors=errors, 189 runtime_s=runtime_s, 190 error_type="a_priori", 191 extra=extra, 192 )
Bilinear RLS (real-valued).
Implements a bilinear regressor RLS structure (Algorithm 11.3 - Diniz). The regressor used here has 4 components:
u[k] = [ x[k],
d[k-1],
x[k] d[k-1],
x[k-1] d[k-1] ]^T
and the RLS update (a priori form) is:
y[k] = w^T u[k]
e[k] = d[k] - y[k]
k[k] = P[k-1] u[k] / (lambda + u[k]^T P[k-1] u[k])
P[k] = (P[k-1] - k[k] u[k]^T P[k-1]) / lambda
w[k] = w[k-1] + k[k] e[k]
Notes
- Real-valued only: enforced by
ensure_real_signals. - Uses the unified base API via
validate_input. - Returns a priori error by default.
58 def __init__( 59 self, 60 forgetting_factor: float = 0.98, 61 delta: float = 1.0, 62 w_init: Optional[ArrayLike] = None, 63 *, 64 safe_eps: float = 1e-12, 65 ) -> None: 66 """ 67 Parameters 68 ---------- 69 forgetting_factor: 70 Forgetting factor lambda (0 < lambda <= 1). 71 delta: 72 Regularization factor for initial P = I/delta (delta > 0). 73 w_init: 74 Optional initial coefficients (length 4). If None, zeros. 75 safe_eps: 76 Small epsilon used to guard denominators. 77 """ 78 n_coeffs = 4 79 super().__init__(filter_order=n_coeffs - 1, w_init=w_init) 80 81 self.lambda_factor = float(forgetting_factor) 82 if not (0.0 < self.lambda_factor <= 1.0): 83 raise ValueError( 84 f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}." 85 ) 86 87 self.delta = float(delta) 88 if self.delta <= 0.0: 89 raise ValueError(f"delta must be > 0. Got delta={self.delta}.") 90 91 self._safe_eps = float(safe_eps) 92 93 self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta
Parameters
forgetting_factor: Forgetting factor lambda (0 < lambda <= 1). delta: Regularization factor for initial P = I/delta (delta > 0). w_init: Optional initial coefficients (length 4). If None, zeros. safe_eps: Small epsilon used to guard denominators.
95 @validate_input 96 @ensure_real_signals 97 def optimize( 98 self, 99 input_signal: np.ndarray, 100 desired_signal: np.ndarray, 101 verbose: bool = False, 102 return_internal_states: bool = False, 103 ) -> OptimizationResult: 104 """ 105 Run Bilinear RLS adaptation. 106 107 Parameters 108 ---------- 109 input_signal: 110 Input signal x[k] (real). 111 desired_signal: 112 Desired signal d[k] (real). 113 verbose: 114 If True, prints runtime. 115 return_internal_states: 116 If True, returns the last regressor and last gain in result.extra. 117 118 Returns 119 ------- 120 OptimizationResult 121 outputs: 122 Filter output y[k] (a priori). 123 errors: 124 A priori error e[k] = d[k] - y[k]. 125 coefficients: 126 History of coefficients stored in the base class (length 4). 127 error_type: 128 "a_priori". 129 """ 130 t0 = perf_counter() 131 132 x = np.asarray(input_signal, dtype=np.float64).ravel() 133 d = np.asarray(desired_signal, dtype=np.float64).ravel() 134 135 n_samples = int(x.size) 136 outputs = np.zeros(n_samples, dtype=np.float64) 137 errors = np.zeros(n_samples, dtype=np.float64) 138 139 x_prev = 0.0 140 d_prev = 0.0 141 142 last_u: Optional[np.ndarray] = None 143 last_k: Optional[np.ndarray] = None 144 145 for k in range(n_samples): 146 u = np.array( 147 [x[k], d_prev, x[k] * d_prev, x_prev * d_prev], 148 dtype=np.float64, 149 ) 150 last_u = u 151 152 y_k = float(np.dot(self.w, u)) 153 outputs[k] = y_k 154 155 e_k = float(d[k] - y_k) 156 errors[k] = e_k 157 158 Pu = self.P @ u 159 denom = float(self.lambda_factor + (u @ Pu)) 160 if abs(denom) < self._safe_eps: 161 denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps) 162 163 k_gain = Pu / denom 164 last_k = k_gain 165 166 self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor 167 168 self.w = self.w + k_gain * e_k 169 self._record_history() 170 171 x_prev = float(x[k]) 172 d_prev = float(d[k]) 173 174 runtime_s = float(perf_counter() - t0) 175 if verbose: 176 print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms") 177 178 extra: Optional[Dict[str, Any]] = None 179 if return_internal_states: 180 extra = { 181 "P_last": self.P.copy(), 182 "last_regressor": None if last_u is None else last_u.copy(), 183 "last_gain": None if last_k is None else last_k.copy(), 184 } 185 186 return self._pack_results( 187 outputs=outputs, 188 errors=errors, 189 runtime_s=runtime_s, 190 error_type="a_priori", 191 extra=extra, 192 )
Run Bilinear RLS adaptation.
Parameters
input_signal: Input signal x[k] (real). desired_signal: Desired signal d[k] (real). verbose: If True, prints runtime. return_internal_states: If True, returns the last regressor and last gain in result.extra.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: A priori error e[k] = d[k] - y[k]. coefficients: History of coefficients stored in the base class (length 4). error_type: "a_priori".
28class ComplexRBF(AdaptiveFilter): 29 """ 30 Complex Radial Basis Function (CRBF) network (complex-valued). 31 32 Implements a complex-valued RBF adaptive model (Algorithm 11.6 - Diniz). 33 The model output is computed as: 34 35 f_p(u) = exp( -||u - c_p||^2 / sigma_p^2 ) 36 y[k] = w^H f(u_k) 37 38 where: 39 - u_k is the input regressor (dimension = input_dim), 40 - c_p are complex centers ("vet" in the original code), 41 - sigma_p are real spreads, 42 - w are complex neuron weights. 43 44 Input handling 45 -------------- 46 This implementation accepts two input formats in `optimize`: 47 48 1) 1D input signal x[k] (shape (N,)): 49 A tapped-delay regressor u_k of length `input_dim` is formed internally. 50 51 2) 2D regressor matrix U (shape (N, input_dim)): 52 Each row is used directly as u_k. 53 54 Notes 55 ----- 56 - Complex-valued implementation (`supports_complex=True`). 57 - The base class `filter_order` is used here as a size indicator (n_neurons-1). 58 - `OptimizationResult.coefficients` stores the history of neuron weights `w`. 59 Centers and spreads can be returned via `result.extra` when requested. 60 """ 61 62 supports_complex: bool = True 63 64 def __init__( 65 self, 66 n_neurons: int, 67 input_dim: int, 68 ur: float = 0.01, 69 uw: float = 0.01, 70 us: float = 0.01, 71 w_init: Optional[ArrayLike] = None, 72 *, 73 sigma_init: float = 1.0, 74 rng: Optional[np.random.Generator] = None, 75 ) -> None: 76 """ 77 Parameters 78 ---------- 79 n_neurons: 80 Number of RBF neurons. 81 input_dim: 82 Dimension of the input regressor u_k. 83 ur: 84 Step-size for centers update. 85 uw: 86 Step-size for weights update. 87 us: 88 Step-size for spread (sigma) update. 89 w_init: 90 Optional initial neuron weights (length n_neurons). If None, random complex. 91 sigma_init: 92 Initial spread value used for all neurons (must be > 0). 93 rng: 94 Optional numpy random generator for reproducible initialization. 95 """ 96 n_neurons = int(n_neurons) 97 input_dim = int(input_dim) 98 if n_neurons <= 0: 99 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 100 if input_dim <= 0: 101 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 102 if sigma_init <= 0.0: 103 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 104 105 # filter_order used as "generic size indicator" (n_neurons-1 => n_neurons taps) 106 super().__init__(filter_order=n_neurons - 1, w_init=None) 107 108 self.n_neurons = n_neurons 109 self.input_dim = input_dim 110 self.ur = float(ur) 111 self.uw = float(uw) 112 self.us = float(us) 113 114 self._rng = rng if rng is not None else np.random.default_rng() 115 116 # weights 117 if w_init is None: 118 w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons) 119 self.w = w0.astype(complex) 120 else: 121 w0 = np.asarray(w_init, dtype=complex).reshape(-1) 122 if w0.size != n_neurons: 123 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 124 self.w = w0 125 126 # centers (complex), shape (n_neurons, input_dim) 127 self.vet = 0.5 * ( 128 self._rng.standard_normal((n_neurons, input_dim)) 129 + 1j * self._rng.standard_normal((n_neurons, input_dim)) 130 ).astype(complex) 131 132 # spreads (real), shape (n_neurons,) 133 self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init) 134 135 # reset base history with correct initial w 136 self.w_history = [] 137 self._record_history() 138 139 @staticmethod 140 def _build_regressors_from_signal(x: np.ndarray, input_dim: int) -> np.ndarray: 141 """Build tapped-delay regressors from a 1D signal (N,)->(N,input_dim).""" 142 x = np.asarray(x, dtype=complex).ravel() 143 n = int(x.size) 144 m = int(input_dim - 1) 145 146 x_padded = np.zeros(n + m, dtype=complex) 147 x_padded[m:] = x 148 149 U = np.zeros((n, input_dim), dtype=complex) 150 for k in range(n): 151 U[k, :] = x_padded[k : k + input_dim][::-1] 152 return U 153 154 @staticmethod 155 def _squared_distance_complex(u: np.ndarray, centers: np.ndarray) -> np.ndarray: 156 """ 157 Compute ||u - c_p||^2 for each center row. 158 u: (input_dim,) 159 centers: (n_neurons, input_dim) 160 returns: (n_neurons,) 161 """ 162 diff = u[None, :] - centers 163 # squared Euclidean distance in C^d: sum(Re^2 + Im^2) 164 return np.sum(diff.real**2 + diff.imag**2, axis=1) 165 166 def optimize( 167 self, 168 input_signal: Union[np.ndarray, list], 169 desired_signal: Union[np.ndarray, list], 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 *, 173 safe_eps: float = 1e-12, 174 ) -> OptimizationResult: 175 """ 176 Run CRBF adaptation. 177 178 Parameters 179 ---------- 180 input_signal: 181 Either: 182 - 1D signal x[k] with shape (N,), or 183 - regressor matrix U with shape (N, input_dim). 184 desired_signal: 185 Desired signal d[k], shape (N,). 186 verbose: 187 If True, prints runtime. 188 return_internal_states: 189 If True, returns final centers/spreads and last activation vector in result.extra. 190 safe_eps: 191 Small epsilon to protect denominators (sigma and other divisions). 192 193 Returns 194 ------- 195 OptimizationResult 196 outputs: 197 Model output y[k]. 198 errors: 199 A priori error e[k] = d[k] - y[k]. 200 coefficients: 201 History of neuron weights w[k] (shape (N+1, n_neurons) in base history). 202 error_type: 203 "a_priori". 204 205 Extra (when return_internal_states=True) 206 -------------------------------------- 207 extra["centers_last"]: 208 Final centers array (n_neurons, input_dim). 209 extra["sigma_last"]: 210 Final spreads array (n_neurons,). 211 extra["last_activation"]: 212 Last activation vector f(u_k) (n_neurons,). 213 extra["last_regressor"]: 214 Last regressor u_k (input_dim,). 215 """ 216 t0 = perf_counter() 217 218 x_in = np.asarray(input_signal) 219 d = np.asarray(desired_signal, dtype=complex).ravel() 220 221 # Build regressors 222 if x_in.ndim == 1: 223 U = self._build_regressors_from_signal(x_in, self.input_dim) 224 elif x_in.ndim == 2: 225 U = np.asarray(x_in, dtype=complex) 226 if U.shape[1] != self.input_dim: 227 raise ValueError( 228 f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}." 229 ) 230 else: 231 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 232 233 N = int(U.shape[0]) 234 if d.size != N: 235 raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).") 236 237 outputs = np.zeros(N, dtype=complex) 238 errors = np.zeros(N, dtype=complex) 239 240 last_f: Optional[np.ndarray] = None 241 last_u: Optional[np.ndarray] = None 242 243 for k in range(N): 244 u = U[k, :] 245 last_u = u 246 247 # activations 248 dis_sq = self._squared_distance_complex(u, self.vet) 249 sigma_sq = np.maximum(self.sigma**2, float(safe_eps)) 250 f = np.exp(-dis_sq / sigma_sq) 251 last_f = f 252 253 # output and error (a priori) 254 y_k = complex(np.vdot(self.w, f)) # conj(w) @ f 255 outputs[k] = y_k 256 e_k = d[k] - y_k 257 errors[k] = e_k 258 259 # weight update (kept as in your code: 2*uw*e*f) 260 self.w = self.w + (2.0 * self.uw) * e_k * f 261 262 # sigma update (kept structurally similar, with protections) 263 denom_sigma = np.maximum(self.sigma**3, float(safe_eps)) 264 grad_sigma = ( 265 (2.0 * self.us) 266 * f 267 * (e_k.real * self.w.real + e_k.imag * self.w.imag) 268 * dis_sq 269 / denom_sigma 270 ) 271 self.sigma = self.sigma + grad_sigma 272 self.sigma = np.maximum(self.sigma, float(safe_eps)) 273 274 # centers update (vectorized over neurons; same intent as your loop) 275 denom_c = np.maximum(self.sigma**2, float(safe_eps)) 276 term = (e_k.real * self.w.real)[:, None] * (u - self.vet).real + 1j * ( 277 (e_k.imag * self.w.imag)[:, None] * (u - self.vet).imag 278 ) 279 self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * term) / denom_c[:, None] 280 281 self._record_history() 282 283 runtime_s = float(perf_counter() - t0) 284 if verbose: 285 print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms") 286 287 extra: Optional[Dict[str, Any]] = None 288 if return_internal_states: 289 extra = { 290 "centers_last": self.vet.copy(), 291 "sigma_last": self.sigma.copy(), 292 "last_activation": None if last_f is None else np.asarray(last_f).copy(), 293 "last_regressor": None if last_u is None else np.asarray(last_u).copy(), 294 "input_dim": int(self.input_dim), 295 "n_neurons": int(self.n_neurons), 296 } 297 298 return self._pack_results( 299 outputs=outputs, 300 errors=errors, 301 runtime_s=runtime_s, 302 error_type="a_priori", 303 extra=extra, 304 )
Complex Radial Basis Function (CRBF) network (complex-valued).
Implements a complex-valued RBF adaptive model (Algorithm 11.6 - Diniz). The model output is computed as:
f_p(u) = exp( -||u - c_p||^2 / sigma_p^2 )
y[k] = w^H f(u_k)
where:
- u_k is the input regressor (dimension = input_dim),
- c_p are complex centers ("vet" in the original code),
- sigma_p are real spreads,
- w are complex neuron weights.
Input handling
This implementation accepts two input formats in optimize:
1) 1D input signal x[k] (shape (N,)):
A tapped-delay regressor u_k of length input_dim is formed internally.
2) 2D regressor matrix U (shape (N, input_dim)): Each row is used directly as u_k.
Notes
- Complex-valued implementation (
supports_complex=True). - The base class
filter_orderis used here as a size indicator (n_neurons-1). OptimizationResult.coefficientsstores the history of neuron weightsw. Centers and spreads can be returned viaresult.extrawhen requested.
64 def __init__( 65 self, 66 n_neurons: int, 67 input_dim: int, 68 ur: float = 0.01, 69 uw: float = 0.01, 70 us: float = 0.01, 71 w_init: Optional[ArrayLike] = None, 72 *, 73 sigma_init: float = 1.0, 74 rng: Optional[np.random.Generator] = None, 75 ) -> None: 76 """ 77 Parameters 78 ---------- 79 n_neurons: 80 Number of RBF neurons. 81 input_dim: 82 Dimension of the input regressor u_k. 83 ur: 84 Step-size for centers update. 85 uw: 86 Step-size for weights update. 87 us: 88 Step-size for spread (sigma) update. 89 w_init: 90 Optional initial neuron weights (length n_neurons). If None, random complex. 91 sigma_init: 92 Initial spread value used for all neurons (must be > 0). 93 rng: 94 Optional numpy random generator for reproducible initialization. 95 """ 96 n_neurons = int(n_neurons) 97 input_dim = int(input_dim) 98 if n_neurons <= 0: 99 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 100 if input_dim <= 0: 101 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 102 if sigma_init <= 0.0: 103 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 104 105 # filter_order used as "generic size indicator" (n_neurons-1 => n_neurons taps) 106 super().__init__(filter_order=n_neurons - 1, w_init=None) 107 108 self.n_neurons = n_neurons 109 self.input_dim = input_dim 110 self.ur = float(ur) 111 self.uw = float(uw) 112 self.us = float(us) 113 114 self._rng = rng if rng is not None else np.random.default_rng() 115 116 # weights 117 if w_init is None: 118 w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons) 119 self.w = w0.astype(complex) 120 else: 121 w0 = np.asarray(w_init, dtype=complex).reshape(-1) 122 if w0.size != n_neurons: 123 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 124 self.w = w0 125 126 # centers (complex), shape (n_neurons, input_dim) 127 self.vet = 0.5 * ( 128 self._rng.standard_normal((n_neurons, input_dim)) 129 + 1j * self._rng.standard_normal((n_neurons, input_dim)) 130 ).astype(complex) 131 132 # spreads (real), shape (n_neurons,) 133 self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init) 134 135 # reset base history with correct initial w 136 self.w_history = [] 137 self._record_history()
Parameters
n_neurons: Number of RBF neurons. input_dim: Dimension of the input regressor u_k. ur: Step-size for centers update. uw: Step-size for weights update. us: Step-size for spread (sigma) update. w_init: Optional initial neuron weights (length n_neurons). If None, random complex. sigma_init: Initial spread value used for all neurons (must be > 0). rng: Optional numpy random generator for reproducible initialization.
166 def optimize( 167 self, 168 input_signal: Union[np.ndarray, list], 169 desired_signal: Union[np.ndarray, list], 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 *, 173 safe_eps: float = 1e-12, 174 ) -> OptimizationResult: 175 """ 176 Run CRBF adaptation. 177 178 Parameters 179 ---------- 180 input_signal: 181 Either: 182 - 1D signal x[k] with shape (N,), or 183 - regressor matrix U with shape (N, input_dim). 184 desired_signal: 185 Desired signal d[k], shape (N,). 186 verbose: 187 If True, prints runtime. 188 return_internal_states: 189 If True, returns final centers/spreads and last activation vector in result.extra. 190 safe_eps: 191 Small epsilon to protect denominators (sigma and other divisions). 192 193 Returns 194 ------- 195 OptimizationResult 196 outputs: 197 Model output y[k]. 198 errors: 199 A priori error e[k] = d[k] - y[k]. 200 coefficients: 201 History of neuron weights w[k] (shape (N+1, n_neurons) in base history). 202 error_type: 203 "a_priori". 204 205 Extra (when return_internal_states=True) 206 -------------------------------------- 207 extra["centers_last"]: 208 Final centers array (n_neurons, input_dim). 209 extra["sigma_last"]: 210 Final spreads array (n_neurons,). 211 extra["last_activation"]: 212 Last activation vector f(u_k) (n_neurons,). 213 extra["last_regressor"]: 214 Last regressor u_k (input_dim,). 215 """ 216 t0 = perf_counter() 217 218 x_in = np.asarray(input_signal) 219 d = np.asarray(desired_signal, dtype=complex).ravel() 220 221 # Build regressors 222 if x_in.ndim == 1: 223 U = self._build_regressors_from_signal(x_in, self.input_dim) 224 elif x_in.ndim == 2: 225 U = np.asarray(x_in, dtype=complex) 226 if U.shape[1] != self.input_dim: 227 raise ValueError( 228 f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}." 229 ) 230 else: 231 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 232 233 N = int(U.shape[0]) 234 if d.size != N: 235 raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).") 236 237 outputs = np.zeros(N, dtype=complex) 238 errors = np.zeros(N, dtype=complex) 239 240 last_f: Optional[np.ndarray] = None 241 last_u: Optional[np.ndarray] = None 242 243 for k in range(N): 244 u = U[k, :] 245 last_u = u 246 247 # activations 248 dis_sq = self._squared_distance_complex(u, self.vet) 249 sigma_sq = np.maximum(self.sigma**2, float(safe_eps)) 250 f = np.exp(-dis_sq / sigma_sq) 251 last_f = f 252 253 # output and error (a priori) 254 y_k = complex(np.vdot(self.w, f)) # conj(w) @ f 255 outputs[k] = y_k 256 e_k = d[k] - y_k 257 errors[k] = e_k 258 259 # weight update (kept as in your code: 2*uw*e*f) 260 self.w = self.w + (2.0 * self.uw) * e_k * f 261 262 # sigma update (kept structurally similar, with protections) 263 denom_sigma = np.maximum(self.sigma**3, float(safe_eps)) 264 grad_sigma = ( 265 (2.0 * self.us) 266 * f 267 * (e_k.real * self.w.real + e_k.imag * self.w.imag) 268 * dis_sq 269 / denom_sigma 270 ) 271 self.sigma = self.sigma + grad_sigma 272 self.sigma = np.maximum(self.sigma, float(safe_eps)) 273 274 # centers update (vectorized over neurons; same intent as your loop) 275 denom_c = np.maximum(self.sigma**2, float(safe_eps)) 276 term = (e_k.real * self.w.real)[:, None] * (u - self.vet).real + 1j * ( 277 (e_k.imag * self.w.imag)[:, None] * (u - self.vet).imag 278 ) 279 self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * term) / denom_c[:, None] 280 281 self._record_history() 282 283 runtime_s = float(perf_counter() - t0) 284 if verbose: 285 print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms") 286 287 extra: Optional[Dict[str, Any]] = None 288 if return_internal_states: 289 extra = { 290 "centers_last": self.vet.copy(), 291 "sigma_last": self.sigma.copy(), 292 "last_activation": None if last_f is None else np.asarray(last_f).copy(), 293 "last_regressor": None if last_u is None else np.asarray(last_u).copy(), 294 "input_dim": int(self.input_dim), 295 "n_neurons": int(self.n_neurons), 296 } 297 298 return self._pack_results( 299 outputs=outputs, 300 errors=errors, 301 runtime_s=runtime_s, 302 error_type="a_priori", 303 extra=extra, 304 )
Run CRBF adaptation.
Parameters
input_signal: Either: - 1D signal x[k] with shape (N,), or - regressor matrix U with shape (N, input_dim). desired_signal: Desired signal d[k], shape (N,). verbose: If True, prints runtime. return_internal_states: If True, returns final centers/spreads and last activation vector in result.extra. safe_eps: Small epsilon to protect denominators (sigma and other divisions).
Returns
OptimizationResult outputs: Model output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of neuron weights w[k] (shape (N+1, n_neurons) in base history). error_type: "a_priori".
Extra (when return_internal_states=True)
extra["centers_last"]: Final centers array (n_neurons, input_dim). extra["sigma_last"]: Final spreads array (n_neurons,). extra["last_activation"]: Last activation vector f(u_k) (n_neurons,). extra["last_regressor"]: Last regressor u_k (input_dim,).
49class MultilayerPerceptron(AdaptiveFilter): 50 """ 51 Multilayer Perceptron (MLP) adaptive model with momentum (real-valued). 52 53 This is a 2-hidden-layer MLP adapted online using a gradient update with momentum. 54 The network is: 55 56 y1 = act(w1 u - b1) 57 y2 = act(w2 y1 - b2) 58 y = w3^T y2 - b3 59 60 where `act` is either tanh or sigmoid. 61 62 Input handling 63 -------------- 64 This implementation accepts two input formats in `optimize`: 65 66 1) 2D input matrix X with shape (N, input_dim): 67 Each row is used directly as the regressor u[k]. 68 69 2) 1D input signal x[k] with shape (N,): 70 A 3-dimensional regressor is formed internally as: 71 u[k] = [x[k], d[k-1], x[k-1]] 72 In this mode, `input_dim` must be 3. 73 74 Notes 75 ----- 76 - Real-valued only: enforced by `ensure_real_signals`. 77 - The base class `filter_order` is used only as a size indicator here. 78 - `OptimizationResult.coefficients` stores a proxy coefficient history (w3). 79 Full parameter trajectories can be returned in `result.extra` when requested. 80 """ 81 82 supports_complex: bool = False 83 84 def __init__( 85 self, 86 n_neurons: int = 10, 87 input_dim: int = 3, 88 step_size: float = 0.01, 89 momentum: float = 0.9, 90 activation: str = "tanh", 91 w_init: Optional[ArrayLike] = None, 92 *, 93 rng: Optional[np.random.Generator] = None, 94 ) -> None: 95 """ 96 Parameters 97 ---------- 98 n_neurons: 99 Number of neurons in each hidden layer. 100 input_dim: 101 Dimension of the input regressor u[k]. 102 If `optimize` is called with a 1D signal, input_dim must be 3. 103 step_size: 104 Gradient step-size (mu). 105 momentum: 106 Momentum factor in [0, 1). Typical values: 0.0 to 0.9. 107 activation: 108 Activation function: "tanh" or "sigmoid". 109 w_init: 110 Optional initialization for the output-layer weights w3 (length n_neurons). 111 If None, Xavier/Glorot initialization is used for all weights. 112 rng: 113 Optional numpy random generator for reproducible initialization. 114 """ 115 n_neurons = int(n_neurons) 116 input_dim = int(input_dim) 117 if n_neurons <= 0: 118 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 119 if input_dim <= 0: 120 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 121 if not (0.0 <= float(momentum) < 1.0): 122 raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.") 123 124 super().__init__(filter_order=n_neurons - 1, w_init=None) 125 126 self.n_neurons = n_neurons 127 self.input_dim = input_dim 128 self.step_size = float(step_size) 129 self.momentum = float(momentum) 130 131 if activation == "tanh": 132 self.act_func = _tanh 133 self.act_deriv = _dtanh 134 elif activation == "sigmoid": 135 self.act_func = _sigmoid 136 self.act_deriv = _dsigmoid 137 else: 138 raise ValueError("activation must be 'tanh' or 'sigmoid'.") 139 140 self._rng = rng if rng is not None else np.random.default_rng() 141 142 limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons))) 143 limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons))) 144 limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1))) 145 146 self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64) 147 self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64) 148 self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64) 149 150 if w_init is not None: 151 w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 152 if w3_0.size != n_neurons: 153 raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.") 154 self.w3 = w3_0 155 156 self.b1 = np.zeros(n_neurons, dtype=np.float64) 157 self.b2 = np.zeros(n_neurons, dtype=np.float64) 158 self.b3 = 0.0 159 160 self.prev_dw1 = np.zeros_like(self.w1) 161 self.prev_dw2 = np.zeros_like(self.w2) 162 self.prev_dw3 = np.zeros_like(self.w3) 163 self.prev_db1 = np.zeros_like(self.b1) 164 self.prev_db2 = np.zeros_like(self.b2) 165 self.prev_db3 = 0.0 166 167 self.w = self.w3.copy() 168 self.w_history = [] 169 self._record_history() 170 171 @staticmethod 172 def _as_regressor_matrix( 173 x_in: np.ndarray, d_in: np.ndarray, input_dim: int 174 ) -> Tuple[np.ndarray, bool]: 175 """ 176 Return (U, is_multidim). 177 178 - If x_in is 2D: U = x_in 179 - If x_in is 1D: builds U[k]=[x[k], d[k-1], x[k-1]] and requires input_dim=3 180 """ 181 x_in = np.asarray(x_in, dtype=np.float64) 182 d_in = np.asarray(d_in, dtype=np.float64).ravel() 183 184 if x_in.ndim == 2: 185 if x_in.shape[0] != d_in.size: 186 raise ValueError(f"Shape mismatch: input({x_in.shape[0]}) and desired({d_in.size}).") 187 if x_in.shape[1] != input_dim: 188 raise ValueError(f"input_signal second dim must be input_dim={input_dim}. Got {x_in.shape}.") 189 return x_in.astype(np.float64, copy=False), True 190 191 if x_in.ndim == 1: 192 if input_dim != 3: 193 raise ValueError( 194 "When input_signal is 1D, this implementation uses u[k]=[x[k], d[k-1], x[k-1]] " 195 "so input_dim must be 3." 196 ) 197 if x_in.size != d_in.size: 198 raise ValueError(f"Shape mismatch: input({x_in.size}) and desired({d_in.size}).") 199 200 N = int(x_in.size) 201 U = np.zeros((N, 3), dtype=np.float64) 202 x_prev = 0.0 203 d_prev = 0.0 204 for k in range(N): 205 U[k, :] = np.array([x_in[k], d_prev, x_prev], dtype=np.float64) 206 x_prev = float(x_in[k]) 207 d_prev = float(d_in[k]) 208 return U, False 209 210 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 211 212 @ensure_real_signals 213 def optimize( 214 self, 215 input_signal: Union[np.ndarray, list], 216 desired_signal: Union[np.ndarray, list], 217 verbose: bool = False, 218 return_internal_states: bool = False, 219 ) -> OptimizationResult: 220 """ 221 Run MLP online adaptation with momentum. 222 223 Parameters 224 ---------- 225 input_signal: 226 Either: 227 - regressor matrix X with shape (N, input_dim), or 228 - 1D signal x[k] with shape (N,) (uses u[k]=[x[k], d[k-1], x[k-1]]; requires input_dim=3). 229 desired_signal: 230 Desired signal d[k], shape (N,). 231 verbose: 232 If True, prints runtime. 233 return_internal_states: 234 If True, returns parameter histories in `result.extra`. 235 236 Returns 237 ------- 238 OptimizationResult 239 outputs: 240 Estimated output y[k]. 241 errors: 242 A priori error e[k] = d[k] - y[k]. 243 coefficients: 244 Proxy coefficient history (w3) stacked from base history. 245 error_type: 246 "a_priori". 247 248 Extra (when return_internal_states=True) 249 -------------------------------------- 250 extra["w1_hist"], extra["w2_hist"], extra["w3_hist"]: 251 Parameter histories (each item is a snapshot per iteration). 252 extra["b1_hist"], extra["b2_hist"], extra["b3_hist"]: 253 Bias histories. 254 """ 255 t0 = perf_counter() 256 257 x_in = np.asarray(input_signal, dtype=np.float64) 258 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 259 260 U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim) 261 N = int(U.shape[0]) 262 263 outputs = np.zeros(N, dtype=np.float64) 264 errors = np.zeros(N, dtype=np.float64) 265 266 w1_hist: List[np.ndarray] = [] 267 w2_hist: List[np.ndarray] = [] 268 w3_hist: List[np.ndarray] = [] 269 b1_hist: List[np.ndarray] = [] 270 b2_hist: List[np.ndarray] = [] 271 b3_hist: List[float] = [] 272 273 for k in range(N): 274 u = U[k, :] 275 276 v1 = (self.w1 @ u) - self.b1 277 y1 = self.act_func(v1) 278 279 v2 = (self.w2 @ y1) - self.b2 280 y2 = self.act_func(v2) 281 282 y_k = float(np.dot(y2, self.w3) - self.b3) 283 outputs[k] = y_k 284 e_k = float(d_in[k] - y_k) 285 errors[k] = e_k 286 287 er_hid2 = e_k * self.w3 * self.act_deriv(v2) 288 er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1) 289 290 dw3 = (2.0 * self.step_size) * e_k * y2 291 self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3 292 self.prev_dw3 = dw3 293 294 db3 = (-2.0 * self.step_size) * e_k 295 self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3) 296 self.prev_db3 = db3 297 298 dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1) 299 self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2 300 self.prev_dw2 = dw2 301 302 db2 = (-2.0 * self.step_size) * er_hid2 303 self.b2 = self.b2 + db2 + self.momentum * self.prev_db2 304 self.prev_db2 = db2 305 306 dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u) 307 self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1 308 self.prev_dw1 = dw1 309 310 db1 = (-2.0 * self.step_size) * er_hid1 311 self.b1 = self.b1 + db1 + self.momentum * self.prev_db1 312 self.prev_db1 = db1 313 314 self.w = self.w3.copy() 315 self._record_history() 316 317 if return_internal_states: 318 w1_hist.append(self.w1.copy()) 319 w2_hist.append(self.w2.copy()) 320 w3_hist.append(self.w3.copy()) 321 b1_hist.append(self.b1.copy()) 322 b2_hist.append(self.b2.copy()) 323 b3_hist.append(float(self.b3)) 324 325 runtime_s = float(perf_counter() - t0) 326 if verbose: 327 print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms") 328 329 extra: Optional[Dict[str, Any]] = None 330 if return_internal_states: 331 extra = { 332 "w1_hist": w1_hist, 333 "w2_hist": w2_hist, 334 "w3_hist": w3_hist, 335 "b1_hist": b1_hist, 336 "b2_hist": b2_hist, 337 "b3_hist": b3_hist, 338 "activation": "tanh" if self.act_func is _tanh else "sigmoid", 339 } 340 341 return self._pack_results( 342 outputs=outputs, 343 errors=errors, 344 runtime_s=runtime_s, 345 error_type="a_priori", 346 extra=extra, 347 )
Multilayer Perceptron (MLP) adaptive model with momentum (real-valued).
This is a 2-hidden-layer MLP adapted online using a gradient update with momentum. The network is:
y1 = act(w1 u - b1)
y2 = act(w2 y1 - b2)
y = w3^T y2 - b3
where act is either tanh or sigmoid.
Input handling
This implementation accepts two input formats in optimize:
1) 2D input matrix X with shape (N, input_dim): Each row is used directly as the regressor u[k].
2) 1D input signal x[k] with shape (N,):
A 3-dimensional regressor is formed internally as:
u[k] = [x[k], d[k-1], x[k-1]]
In this mode, input_dim must be 3.
Notes
- Real-valued only: enforced by
ensure_real_signals. - The base class
filter_orderis used only as a size indicator here. OptimizationResult.coefficientsstores a proxy coefficient history (w3). Full parameter trajectories can be returned inresult.extrawhen requested.
84 def __init__( 85 self, 86 n_neurons: int = 10, 87 input_dim: int = 3, 88 step_size: float = 0.01, 89 momentum: float = 0.9, 90 activation: str = "tanh", 91 w_init: Optional[ArrayLike] = None, 92 *, 93 rng: Optional[np.random.Generator] = None, 94 ) -> None: 95 """ 96 Parameters 97 ---------- 98 n_neurons: 99 Number of neurons in each hidden layer. 100 input_dim: 101 Dimension of the input regressor u[k]. 102 If `optimize` is called with a 1D signal, input_dim must be 3. 103 step_size: 104 Gradient step-size (mu). 105 momentum: 106 Momentum factor in [0, 1). Typical values: 0.0 to 0.9. 107 activation: 108 Activation function: "tanh" or "sigmoid". 109 w_init: 110 Optional initialization for the output-layer weights w3 (length n_neurons). 111 If None, Xavier/Glorot initialization is used for all weights. 112 rng: 113 Optional numpy random generator for reproducible initialization. 114 """ 115 n_neurons = int(n_neurons) 116 input_dim = int(input_dim) 117 if n_neurons <= 0: 118 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 119 if input_dim <= 0: 120 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 121 if not (0.0 <= float(momentum) < 1.0): 122 raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.") 123 124 super().__init__(filter_order=n_neurons - 1, w_init=None) 125 126 self.n_neurons = n_neurons 127 self.input_dim = input_dim 128 self.step_size = float(step_size) 129 self.momentum = float(momentum) 130 131 if activation == "tanh": 132 self.act_func = _tanh 133 self.act_deriv = _dtanh 134 elif activation == "sigmoid": 135 self.act_func = _sigmoid 136 self.act_deriv = _dsigmoid 137 else: 138 raise ValueError("activation must be 'tanh' or 'sigmoid'.") 139 140 self._rng = rng if rng is not None else np.random.default_rng() 141 142 limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons))) 143 limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons))) 144 limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1))) 145 146 self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64) 147 self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64) 148 self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64) 149 150 if w_init is not None: 151 w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 152 if w3_0.size != n_neurons: 153 raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.") 154 self.w3 = w3_0 155 156 self.b1 = np.zeros(n_neurons, dtype=np.float64) 157 self.b2 = np.zeros(n_neurons, dtype=np.float64) 158 self.b3 = 0.0 159 160 self.prev_dw1 = np.zeros_like(self.w1) 161 self.prev_dw2 = np.zeros_like(self.w2) 162 self.prev_dw3 = np.zeros_like(self.w3) 163 self.prev_db1 = np.zeros_like(self.b1) 164 self.prev_db2 = np.zeros_like(self.b2) 165 self.prev_db3 = 0.0 166 167 self.w = self.w3.copy() 168 self.w_history = [] 169 self._record_history()
Parameters
n_neurons:
Number of neurons in each hidden layer.
input_dim:
Dimension of the input regressor u[k].
If optimize is called with a 1D signal, input_dim must be 3.
step_size:
Gradient step-size (mu).
momentum:
Momentum factor in [0, 1). Typical values: 0.0 to 0.9.
activation:
Activation function: "tanh" or "sigmoid".
w_init:
Optional initialization for the output-layer weights w3 (length n_neurons).
If None, Xavier/Glorot initialization is used for all weights.
rng:
Optional numpy random generator for reproducible initialization.
212 @ensure_real_signals 213 def optimize( 214 self, 215 input_signal: Union[np.ndarray, list], 216 desired_signal: Union[np.ndarray, list], 217 verbose: bool = False, 218 return_internal_states: bool = False, 219 ) -> OptimizationResult: 220 """ 221 Run MLP online adaptation with momentum. 222 223 Parameters 224 ---------- 225 input_signal: 226 Either: 227 - regressor matrix X with shape (N, input_dim), or 228 - 1D signal x[k] with shape (N,) (uses u[k]=[x[k], d[k-1], x[k-1]]; requires input_dim=3). 229 desired_signal: 230 Desired signal d[k], shape (N,). 231 verbose: 232 If True, prints runtime. 233 return_internal_states: 234 If True, returns parameter histories in `result.extra`. 235 236 Returns 237 ------- 238 OptimizationResult 239 outputs: 240 Estimated output y[k]. 241 errors: 242 A priori error e[k] = d[k] - y[k]. 243 coefficients: 244 Proxy coefficient history (w3) stacked from base history. 245 error_type: 246 "a_priori". 247 248 Extra (when return_internal_states=True) 249 -------------------------------------- 250 extra["w1_hist"], extra["w2_hist"], extra["w3_hist"]: 251 Parameter histories (each item is a snapshot per iteration). 252 extra["b1_hist"], extra["b2_hist"], extra["b3_hist"]: 253 Bias histories. 254 """ 255 t0 = perf_counter() 256 257 x_in = np.asarray(input_signal, dtype=np.float64) 258 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 259 260 U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim) 261 N = int(U.shape[0]) 262 263 outputs = np.zeros(N, dtype=np.float64) 264 errors = np.zeros(N, dtype=np.float64) 265 266 w1_hist: List[np.ndarray] = [] 267 w2_hist: List[np.ndarray] = [] 268 w3_hist: List[np.ndarray] = [] 269 b1_hist: List[np.ndarray] = [] 270 b2_hist: List[np.ndarray] = [] 271 b3_hist: List[float] = [] 272 273 for k in range(N): 274 u = U[k, :] 275 276 v1 = (self.w1 @ u) - self.b1 277 y1 = self.act_func(v1) 278 279 v2 = (self.w2 @ y1) - self.b2 280 y2 = self.act_func(v2) 281 282 y_k = float(np.dot(y2, self.w3) - self.b3) 283 outputs[k] = y_k 284 e_k = float(d_in[k] - y_k) 285 errors[k] = e_k 286 287 er_hid2 = e_k * self.w3 * self.act_deriv(v2) 288 er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1) 289 290 dw3 = (2.0 * self.step_size) * e_k * y2 291 self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3 292 self.prev_dw3 = dw3 293 294 db3 = (-2.0 * self.step_size) * e_k 295 self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3) 296 self.prev_db3 = db3 297 298 dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1) 299 self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2 300 self.prev_dw2 = dw2 301 302 db2 = (-2.0 * self.step_size) * er_hid2 303 self.b2 = self.b2 + db2 + self.momentum * self.prev_db2 304 self.prev_db2 = db2 305 306 dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u) 307 self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1 308 self.prev_dw1 = dw1 309 310 db1 = (-2.0 * self.step_size) * er_hid1 311 self.b1 = self.b1 + db1 + self.momentum * self.prev_db1 312 self.prev_db1 = db1 313 314 self.w = self.w3.copy() 315 self._record_history() 316 317 if return_internal_states: 318 w1_hist.append(self.w1.copy()) 319 w2_hist.append(self.w2.copy()) 320 w3_hist.append(self.w3.copy()) 321 b1_hist.append(self.b1.copy()) 322 b2_hist.append(self.b2.copy()) 323 b3_hist.append(float(self.b3)) 324 325 runtime_s = float(perf_counter() - t0) 326 if verbose: 327 print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms") 328 329 extra: Optional[Dict[str, Any]] = None 330 if return_internal_states: 331 extra = { 332 "w1_hist": w1_hist, 333 "w2_hist": w2_hist, 334 "w3_hist": w3_hist, 335 "b1_hist": b1_hist, 336 "b2_hist": b2_hist, 337 "b3_hist": b3_hist, 338 "activation": "tanh" if self.act_func is _tanh else "sigmoid", 339 } 340 341 return self._pack_results( 342 outputs=outputs, 343 errors=errors, 344 runtime_s=runtime_s, 345 error_type="a_priori", 346 extra=extra, 347 )
Run MLP online adaptation with momentum.
Parameters
input_signal:
Either:
- regressor matrix X with shape (N, input_dim), or
- 1D signal x[k] with shape (N,) (uses u[k]=[x[k], d[k-1], x[k-1]]; requires input_dim=3).
desired_signal:
Desired signal d[k], shape (N,).
verbose:
If True, prints runtime.
return_internal_states:
If True, returns parameter histories in result.extra.
Returns
OptimizationResult outputs: Estimated output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: Proxy coefficient history (w3) stacked from base history. error_type: "a_priori".
Extra (when return_internal_states=True)
extra["w1_hist"], extra["w2_hist"], extra["w3_hist"]: Parameter histories (each item is a snapshot per iteration). extra["b1_hist"], extra["b2_hist"], extra["b3_hist"]: Bias histories.
29class RBF(AdaptiveFilter): 30 """ 31 Radial Basis Function (RBF) adaptive model (real-valued). 32 33 Implements Algorithm 11.5 (Diniz) with online adaptation of: 34 - output weights w (length n_neurons), 35 - centers/centroids (reference vectors) `vet` (shape n_neurons x input_dim), 36 - spreads `sigma` (length n_neurons). 37 38 Model (common form) 39 ------------------- 40 For regressor u[k] (shape input_dim,), the i-th basis function is: 41 42 phi_i(u[k]) = exp( -||u[k] - c_i||^2 / sigma_i^2 ) 43 44 Output: 45 y[k] = sum_i w_i * phi_i(u[k]) 46 47 Input handling 48 -------------- 49 `optimize` accepts: 50 1) input_signal as a regressor matrix with shape (N, input_dim), where each row is u[k]. 51 2) input_signal as a 1D signal x[k] with shape (N,). In this case, regressors are built 52 as tapped-delay vectors of length input_dim: 53 u[k] = [x[k], x[k-1], ..., x[k-input_dim+1]] 54 55 Notes 56 ----- 57 - Real-valued only: enforced by `ensure_real_signals`. 58 - The base class coefficient vector `self.w` is used for the neuron output weights. 59 Coefficient history in `OptimizationResult.coefficients` corresponds to w over time. 60 """ 61 62 supports_complex: bool = False 63 64 def __init__( 65 self, 66 n_neurons: int, 67 input_dim: int, 68 ur: float = 0.01, 69 uw: float = 0.01, 70 us: float = 0.01, 71 w_init: Optional[ArrayLike] = None, 72 *, 73 sigma_init: float = 1.0, 74 centers_init_scale: float = 0.5, 75 rng: Optional[np.random.Generator] = None, 76 safe_eps: float = 1e-12, 77 ) -> None: 78 """ 79 Parameters 80 ---------- 81 n_neurons: 82 Number of RBF neurons (basis functions). 83 input_dim: 84 Dimension of the regressor u[k]. If input_signal is 1D, this is the tap length. 85 ur: 86 Step-size for center updates. 87 uw: 88 Step-size for output weight updates. 89 us: 90 Step-size for spread (sigma) updates. 91 w_init: 92 Optional initialization for output weights w (length n_neurons). If None, random normal. 93 sigma_init: 94 Initial sigma value for all neurons. 95 centers_init_scale: 96 Scale factor used for random initialization of centers. 97 rng: 98 Optional numpy random generator for reproducible initialization. 99 safe_eps: 100 Small epsilon to protect denominators (sigma^2, sigma^3). 101 """ 102 n_neurons = int(n_neurons) 103 input_dim = int(input_dim) 104 if n_neurons <= 0: 105 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 106 if input_dim <= 0: 107 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 108 if float(sigma_init) <= 0.0: 109 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 110 111 super().__init__(filter_order=n_neurons - 1, w_init=None) 112 113 self.n_neurons = n_neurons 114 self.input_dim = input_dim 115 self.ur = float(ur) 116 self.uw = float(uw) 117 self.us = float(us) 118 119 self._safe_eps = float(safe_eps) 120 self._rng = rng if rng is not None else np.random.default_rng() 121 122 if w_init is None: 123 self.w = self._rng.standard_normal(n_neurons).astype(np.float64) 124 else: 125 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 126 if w0.size != n_neurons: 127 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 128 self.w = w0 129 130 self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype( 131 np.float64 132 ) 133 self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init) 134 135 self.w_history = [] 136 self._record_history() 137 138 @staticmethod 139 def _build_regressors_1d(x: np.ndarray, input_dim: int) -> np.ndarray: 140 """Build tapped-delay regressors u[k]=[x[k], x[k-1], ..., x[k-input_dim+1]].""" 141 x = np.asarray(x, dtype=np.float64).ravel() 142 N = int(x.size) 143 m = int(input_dim) - 1 144 x_pad = np.zeros(N + m, dtype=np.float64) 145 x_pad[m:] = x 146 return np.array([x_pad[k : k + m + 1][::-1] for k in range(N)], dtype=np.float64) 147 148 @staticmethod 149 def _as_regressor_matrix(x_in: np.ndarray, input_dim: int) -> Tuple[np.ndarray, int]: 150 """Return (U, N) from either (N,input_dim) or (N,) input.""" 151 x_in = np.asarray(x_in, dtype=np.float64) 152 if x_in.ndim == 2: 153 if x_in.shape[1] != input_dim: 154 raise ValueError(f"input_signal must have shape (N,{input_dim}). Got {x_in.shape}.") 155 return x_in.astype(np.float64, copy=False), int(x_in.shape[0]) 156 if x_in.ndim == 1: 157 U = RBF._build_regressors_1d(x_in, input_dim=input_dim) 158 return U, int(U.shape[0]) 159 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 160 161 @ensure_real_signals 162 def optimize( 163 self, 164 input_signal: Union[np.ndarray, list], 165 desired_signal: Union[np.ndarray, list], 166 verbose: bool = False, 167 return_internal_states: bool = False, 168 ) -> OptimizationResult: 169 """ 170 Run RBF online adaptation. 171 172 Parameters 173 ---------- 174 input_signal: 175 Either: 176 - regressor matrix with shape (N, input_dim), or 177 - 1D signal x[k] with shape (N,) (tapped-delay regressors are built internally). 178 desired_signal: 179 Desired output d[k], shape (N,). 180 verbose: 181 If True, prints runtime. 182 return_internal_states: 183 If True, returns final centers/spreads and selected debug info in `result.extra`. 184 185 Returns 186 ------- 187 OptimizationResult 188 outputs: 189 Estimated output y[k]. 190 errors: 191 A priori error e[k] = d[k] - y[k]. 192 coefficients: 193 History of neuron output weights w (stacked from base history). 194 error_type: 195 "a_priori". 196 197 Extra (when return_internal_states=True) 198 -------------------------------------- 199 extra["centers_last"]: 200 Final centers array `vet` (n_neurons, input_dim). 201 extra["sigma_last"]: 202 Final sigma vector (n_neurons,). 203 extra["last_phi"]: 204 Last basis-function activation vector phi(u[k]) (n_neurons,). 205 """ 206 t0 = perf_counter() 207 208 x_in = np.asarray(input_signal, dtype=np.float64) 209 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 210 211 U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim) 212 if d_in.size != N: 213 raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).") 214 215 outputs = np.zeros(N, dtype=np.float64) 216 errors = np.zeros(N, dtype=np.float64) 217 218 last_phi: Optional[np.ndarray] = None 219 220 for k in range(N): 221 u = U[k, :] 222 223 diff = u[None, :] - self.vet 224 dis_sq = np.sum(diff * diff, axis=1) 225 226 sigma_sq = (self.sigma * self.sigma) + self._safe_eps 227 phi = np.exp(-dis_sq / sigma_sq) 228 last_phi = phi 229 230 y_k = float(np.dot(self.w, phi)) 231 outputs[k] = y_k 232 233 e_k = float(d_in[k] - y_k) 234 errors[k] = e_k 235 236 self.w = self.w + (2.0 * self.uw) * e_k * phi 237 238 sigma_cu = np.maximum(self.sigma, self._safe_eps) 239 self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3) 240 241 denom_c = (sigma_cu**2) + self._safe_eps 242 for p in range(self.n_neurons): 243 self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p] 244 245 self._record_history() 246 247 runtime_s = float(perf_counter() - t0) 248 if verbose: 249 print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms") 250 251 extra: Optional[Dict[str, Any]] = None 252 if return_internal_states: 253 extra = { 254 "centers_last": self.vet.copy(), 255 "sigma_last": self.sigma.copy(), 256 "last_phi": None if last_phi is None else last_phi.copy(), 257 } 258 259 return self._pack_results( 260 outputs=outputs, 261 errors=errors, 262 runtime_s=runtime_s, 263 error_type="a_priori", 264 extra=extra, 265 )
Radial Basis Function (RBF) adaptive model (real-valued).
Implements Algorithm 11.5 (Diniz) with online adaptation of:
- output weights w (length n_neurons),
- centers/centroids (reference vectors)
vet(shape n_neurons x input_dim), - spreads
sigma(length n_neurons).
Model (common form)
For regressor u[k] (shape input_dim,), the i-th basis function is:
phi_i(u[k]) = exp( -||u[k] - c_i||^2 / sigma_i^2 )
Output: y[k] = sum_i w_i * phi_i(u[k])
Input handling
optimize accepts:
1) input_signal as a regressor matrix with shape (N, input_dim), where each row is u[k].
2) input_signal as a 1D signal x[k] with shape (N,). In this case, regressors are built
as tapped-delay vectors of length input_dim:
u[k] = [x[k], x[k-1], ..., x[k-input_dim+1]]
Notes
- Real-valued only: enforced by
ensure_real_signals. - The base class coefficient vector
self.wis used for the neuron output weights. Coefficient history inOptimizationResult.coefficientscorresponds to w over time.
64 def __init__( 65 self, 66 n_neurons: int, 67 input_dim: int, 68 ur: float = 0.01, 69 uw: float = 0.01, 70 us: float = 0.01, 71 w_init: Optional[ArrayLike] = None, 72 *, 73 sigma_init: float = 1.0, 74 centers_init_scale: float = 0.5, 75 rng: Optional[np.random.Generator] = None, 76 safe_eps: float = 1e-12, 77 ) -> None: 78 """ 79 Parameters 80 ---------- 81 n_neurons: 82 Number of RBF neurons (basis functions). 83 input_dim: 84 Dimension of the regressor u[k]. If input_signal is 1D, this is the tap length. 85 ur: 86 Step-size for center updates. 87 uw: 88 Step-size for output weight updates. 89 us: 90 Step-size for spread (sigma) updates. 91 w_init: 92 Optional initialization for output weights w (length n_neurons). If None, random normal. 93 sigma_init: 94 Initial sigma value for all neurons. 95 centers_init_scale: 96 Scale factor used for random initialization of centers. 97 rng: 98 Optional numpy random generator for reproducible initialization. 99 safe_eps: 100 Small epsilon to protect denominators (sigma^2, sigma^3). 101 """ 102 n_neurons = int(n_neurons) 103 input_dim = int(input_dim) 104 if n_neurons <= 0: 105 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 106 if input_dim <= 0: 107 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 108 if float(sigma_init) <= 0.0: 109 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 110 111 super().__init__(filter_order=n_neurons - 1, w_init=None) 112 113 self.n_neurons = n_neurons 114 self.input_dim = input_dim 115 self.ur = float(ur) 116 self.uw = float(uw) 117 self.us = float(us) 118 119 self._safe_eps = float(safe_eps) 120 self._rng = rng if rng is not None else np.random.default_rng() 121 122 if w_init is None: 123 self.w = self._rng.standard_normal(n_neurons).astype(np.float64) 124 else: 125 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 126 if w0.size != n_neurons: 127 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 128 self.w = w0 129 130 self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype( 131 np.float64 132 ) 133 self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init) 134 135 self.w_history = [] 136 self._record_history()
Parameters
n_neurons: Number of RBF neurons (basis functions). input_dim: Dimension of the regressor u[k]. If input_signal is 1D, this is the tap length. ur: Step-size for center updates. uw: Step-size for output weight updates. us: Step-size for spread (sigma) updates. w_init: Optional initialization for output weights w (length n_neurons). If None, random normal. sigma_init: Initial sigma value for all neurons. centers_init_scale: Scale factor used for random initialization of centers. rng: Optional numpy random generator for reproducible initialization. safe_eps: Small epsilon to protect denominators (sigma^2, sigma^3).
161 @ensure_real_signals 162 def optimize( 163 self, 164 input_signal: Union[np.ndarray, list], 165 desired_signal: Union[np.ndarray, list], 166 verbose: bool = False, 167 return_internal_states: bool = False, 168 ) -> OptimizationResult: 169 """ 170 Run RBF online adaptation. 171 172 Parameters 173 ---------- 174 input_signal: 175 Either: 176 - regressor matrix with shape (N, input_dim), or 177 - 1D signal x[k] with shape (N,) (tapped-delay regressors are built internally). 178 desired_signal: 179 Desired output d[k], shape (N,). 180 verbose: 181 If True, prints runtime. 182 return_internal_states: 183 If True, returns final centers/spreads and selected debug info in `result.extra`. 184 185 Returns 186 ------- 187 OptimizationResult 188 outputs: 189 Estimated output y[k]. 190 errors: 191 A priori error e[k] = d[k] - y[k]. 192 coefficients: 193 History of neuron output weights w (stacked from base history). 194 error_type: 195 "a_priori". 196 197 Extra (when return_internal_states=True) 198 -------------------------------------- 199 extra["centers_last"]: 200 Final centers array `vet` (n_neurons, input_dim). 201 extra["sigma_last"]: 202 Final sigma vector (n_neurons,). 203 extra["last_phi"]: 204 Last basis-function activation vector phi(u[k]) (n_neurons,). 205 """ 206 t0 = perf_counter() 207 208 x_in = np.asarray(input_signal, dtype=np.float64) 209 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 210 211 U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim) 212 if d_in.size != N: 213 raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).") 214 215 outputs = np.zeros(N, dtype=np.float64) 216 errors = np.zeros(N, dtype=np.float64) 217 218 last_phi: Optional[np.ndarray] = None 219 220 for k in range(N): 221 u = U[k, :] 222 223 diff = u[None, :] - self.vet 224 dis_sq = np.sum(diff * diff, axis=1) 225 226 sigma_sq = (self.sigma * self.sigma) + self._safe_eps 227 phi = np.exp(-dis_sq / sigma_sq) 228 last_phi = phi 229 230 y_k = float(np.dot(self.w, phi)) 231 outputs[k] = y_k 232 233 e_k = float(d_in[k] - y_k) 234 errors[k] = e_k 235 236 self.w = self.w + (2.0 * self.uw) * e_k * phi 237 238 sigma_cu = np.maximum(self.sigma, self._safe_eps) 239 self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3) 240 241 denom_c = (sigma_cu**2) + self._safe_eps 242 for p in range(self.n_neurons): 243 self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p] 244 245 self._record_history() 246 247 runtime_s = float(perf_counter() - t0) 248 if verbose: 249 print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms") 250 251 extra: Optional[Dict[str, Any]] = None 252 if return_internal_states: 253 extra = { 254 "centers_last": self.vet.copy(), 255 "sigma_last": self.sigma.copy(), 256 "last_phi": None if last_phi is None else last_phi.copy(), 257 } 258 259 return self._pack_results( 260 outputs=outputs, 261 errors=errors, 262 runtime_s=runtime_s, 263 error_type="a_priori", 264 extra=extra, 265 )
Run RBF online adaptation.
Parameters
input_signal:
Either:
- regressor matrix with shape (N, input_dim), or
- 1D signal x[k] with shape (N,) (tapped-delay regressors are built internally).
desired_signal:
Desired output d[k], shape (N,).
verbose:
If True, prints runtime.
return_internal_states:
If True, returns final centers/spreads and selected debug info in result.extra.
Returns
OptimizationResult outputs: Estimated output y[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of neuron output weights w (stacked from base history). error_type: "a_priori".
Extra (when return_internal_states=True)
extra["centers_last"]:
Final centers array vet (n_neurons, input_dim).
extra["sigma_last"]:
Final sigma vector (n_neurons,).
extra["last_phi"]:
Last basis-function activation vector phi(u[k]) (n_neurons,).
29class VolterraLMS(AdaptiveFilter): 30 """ 31 Volterra LMS (2nd-order) for real-valued adaptive filtering. 32 33 Implements Algorithm 11.1 (Diniz) using a second-order Volterra expansion. 34 35 For a linear memory length L (called `memory`), the regressor is composed of: 36 - linear terms: [x[k], x[k-1], ..., x[k-L+1]] 37 - quadratic terms (with i <= j): 38 [x[k]^2, x[k]x[k-1], ..., x[k-L+1]^2] 39 40 Total number of coefficients: 41 n_coeffs = L + L(L+1)/2 42 43 Notes 44 ----- 45 - Real-valued only: enforced by `ensure_real_signals`. 46 - The base class coefficient vector `self.w` corresponds to the Volterra 47 coefficient vector (linear + quadratic). The history returned in 48 `OptimizationResult.coefficients` is the stacked trajectory of `self.w`. 49 - `step` can be: 50 * scalar (same step for all coefficients), or 51 * vector (shape (n_coeffs,)) allowing per-term step scaling. 52 """ 53 54 supports_complex: bool = False 55 56 def __init__( 57 self, 58 memory: int = 3, 59 step: Union[float, np.ndarray, list] = 1e-2, 60 w_init: Optional[ArrayLike] = None, 61 *, 62 safe_eps: float = 1e-12, 63 ) -> None: 64 """ 65 Parameters 66 ---------- 67 memory: 68 Linear memory length L. Determines the Volterra regressor size: 69 n_coeffs = L + L(L+1)/2. 70 step: 71 Step-size mu. Can be a scalar or a vector of length n_coeffs. 72 w_init: 73 Optional initial coefficients (length n_coeffs). If None, zeros. 74 safe_eps: 75 Small epsilon used for internal safety checks (kept for consistency). 76 """ 77 memory = int(memory) 78 if memory <= 0: 79 raise ValueError(f"memory must be > 0. Got {memory}.") 80 81 self.memory: int = memory 82 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 83 self._safe_eps: float = float(safe_eps) 84 85 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 86 87 if isinstance(step, (list, np.ndarray)): 88 step_vec = np.asarray(step, dtype=np.float64).reshape(-1) 89 if step_vec.size != self.n_coeffs: 90 raise ValueError( 91 f"step vector must have length {self.n_coeffs}, got {step_vec.size}." 92 ) 93 self.step: Union[float, np.ndarray] = step_vec 94 else: 95 self.step = float(step) 96 97 self.w = np.asarray(self.w, dtype=np.float64) 98 99 self.w_history = [] 100 self._record_history() 101 102 def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray: 103 """ 104 Construct the 2nd-order Volterra regressor from a linear delay line. 105 106 Parameters 107 ---------- 108 x_lin: 109 Linear delay line of length `memory` ordered as: 110 [x[k], x[k-1], ..., x[k-L+1]]. 111 112 Returns 113 ------- 114 np.ndarray 115 Volterra regressor u[k] of length n_coeffs: 116 [linear terms, quadratic terms (i<=j)]. 117 """ 118 x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1) 119 if x_lin.size != self.memory: 120 raise ValueError( 121 f"x_lin must have length {self.memory}, got {x_lin.size}." 122 ) 123 124 quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64) 125 idx = 0 126 for i in range(self.memory): 127 for j in range(i, self.memory): 128 quad[idx] = x_lin[i] * x_lin[j] 129 idx += 1 130 131 return np.concatenate([x_lin, quad], axis=0) 132 133 @ensure_real_signals 134 def optimize( 135 self, 136 input_signal: Union[np.ndarray, list], 137 desired_signal: Union[np.ndarray, list], 138 verbose: bool = False, 139 return_internal_states: bool = False, 140 ) -> OptimizationResult: 141 """ 142 Run Volterra LMS adaptation over (x[k], d[k]). 143 144 Parameters 145 ---------- 146 input_signal: 147 Input sequence x[k], shape (N,). 148 desired_signal: 149 Desired sequence d[k], shape (N,). 150 verbose: 151 If True, prints runtime. 152 return_internal_states: 153 If True, returns selected internal values in `result.extra`. 154 155 Returns 156 ------- 157 OptimizationResult 158 outputs: 159 Filter output y[k] (a priori). 160 errors: 161 A priori error e[k] = d[k] - y[k]. 162 coefficients: 163 History of Volterra coefficient vector w (stacked from base history). 164 error_type: 165 "a_priori". 166 167 Extra (when return_internal_states=True) 168 -------------------------------------- 169 extra["last_regressor"]: 170 Last Volterra regressor u[k] (length n_coeffs). 171 extra["memory"]: 172 Linear memory length L. 173 extra["n_coeffs"]: 174 Number of Volterra coefficients. 175 """ 176 t0 = perf_counter() 177 178 x = np.asarray(input_signal, dtype=np.float64).ravel() 179 d = np.asarray(desired_signal, dtype=np.float64).ravel() 180 181 if x.size != d.size: 182 raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})") 183 n_samples = int(x.size) 184 185 outputs = np.zeros(n_samples, dtype=np.float64) 186 errors = np.zeros(n_samples, dtype=np.float64) 187 188 L = int(self.memory) 189 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 190 x_padded[L - 1 :] = x 191 192 last_u: Optional[np.ndarray] = None 193 194 for k in range(n_samples): 195 x_lin = x_padded[k : k + L][::-1] 196 u = self._create_volterra_regressor(x_lin) 197 last_u = u 198 199 y_k = float(np.dot(self.w, u)) 200 outputs[k] = y_k 201 202 e_k = float(d[k] - y_k) 203 errors[k] = e_k 204 205 if isinstance(self.step, np.ndarray): 206 self.w = self.w + (2.0 * self.step) * e_k * u 207 else: 208 self.w = self.w + (2.0 * float(self.step)) * e_k * u 209 210 self._record_history() 211 212 runtime_s = float(perf_counter() - t0) 213 if verbose: 214 print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms") 215 216 extra: Optional[Dict[str, Any]] = None 217 if return_internal_states: 218 extra = { 219 "last_regressor": None if last_u is None else last_u.copy(), 220 "memory": int(self.memory), 221 "n_coeffs": int(self.n_coeffs), 222 } 223 224 return self._pack_results( 225 outputs=outputs, 226 errors= errors, 227 runtime_s=runtime_s, 228 error_type="a_priori", 229 extra=extra, 230 )
Volterra LMS (2nd-order) for real-valued adaptive filtering.
Implements Algorithm 11.1 (Diniz) using a second-order Volterra expansion.
For a linear memory length L (called memory), the regressor is composed of:
- linear terms: [x[k], x[k-1], ..., x[k-L+1]]
- quadratic terms (with i <= j): [x[k]^2, x[k]x[k-1], ..., x[k-L+1]^2]
Total number of coefficients: n_coeffs = L + L(L+1)/2
Notes
- Real-valued only: enforced by
ensure_real_signals. - The base class coefficient vector
self.wcorresponds to the Volterra coefficient vector (linear + quadratic). The history returned inOptimizationResult.coefficientsis the stacked trajectory ofself.w. stepcan be:- scalar (same step for all coefficients), or
- vector (shape (n_coeffs,)) allowing per-term step scaling.
56 def __init__( 57 self, 58 memory: int = 3, 59 step: Union[float, np.ndarray, list] = 1e-2, 60 w_init: Optional[ArrayLike] = None, 61 *, 62 safe_eps: float = 1e-12, 63 ) -> None: 64 """ 65 Parameters 66 ---------- 67 memory: 68 Linear memory length L. Determines the Volterra regressor size: 69 n_coeffs = L + L(L+1)/2. 70 step: 71 Step-size mu. Can be a scalar or a vector of length n_coeffs. 72 w_init: 73 Optional initial coefficients (length n_coeffs). If None, zeros. 74 safe_eps: 75 Small epsilon used for internal safety checks (kept for consistency). 76 """ 77 memory = int(memory) 78 if memory <= 0: 79 raise ValueError(f"memory must be > 0. Got {memory}.") 80 81 self.memory: int = memory 82 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 83 self._safe_eps: float = float(safe_eps) 84 85 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 86 87 if isinstance(step, (list, np.ndarray)): 88 step_vec = np.asarray(step, dtype=np.float64).reshape(-1) 89 if step_vec.size != self.n_coeffs: 90 raise ValueError( 91 f"step vector must have length {self.n_coeffs}, got {step_vec.size}." 92 ) 93 self.step: Union[float, np.ndarray] = step_vec 94 else: 95 self.step = float(step) 96 97 self.w = np.asarray(self.w, dtype=np.float64) 98 99 self.w_history = [] 100 self._record_history()
Parameters
memory: Linear memory length L. Determines the Volterra regressor size: n_coeffs = L + L(L+1)/2. step: Step-size mu. Can be a scalar or a vector of length n_coeffs. w_init: Optional initial coefficients (length n_coeffs). If None, zeros. safe_eps: Small epsilon used for internal safety checks (kept for consistency).
133 @ensure_real_signals 134 def optimize( 135 self, 136 input_signal: Union[np.ndarray, list], 137 desired_signal: Union[np.ndarray, list], 138 verbose: bool = False, 139 return_internal_states: bool = False, 140 ) -> OptimizationResult: 141 """ 142 Run Volterra LMS adaptation over (x[k], d[k]). 143 144 Parameters 145 ---------- 146 input_signal: 147 Input sequence x[k], shape (N,). 148 desired_signal: 149 Desired sequence d[k], shape (N,). 150 verbose: 151 If True, prints runtime. 152 return_internal_states: 153 If True, returns selected internal values in `result.extra`. 154 155 Returns 156 ------- 157 OptimizationResult 158 outputs: 159 Filter output y[k] (a priori). 160 errors: 161 A priori error e[k] = d[k] - y[k]. 162 coefficients: 163 History of Volterra coefficient vector w (stacked from base history). 164 error_type: 165 "a_priori". 166 167 Extra (when return_internal_states=True) 168 -------------------------------------- 169 extra["last_regressor"]: 170 Last Volterra regressor u[k] (length n_coeffs). 171 extra["memory"]: 172 Linear memory length L. 173 extra["n_coeffs"]: 174 Number of Volterra coefficients. 175 """ 176 t0 = perf_counter() 177 178 x = np.asarray(input_signal, dtype=np.float64).ravel() 179 d = np.asarray(desired_signal, dtype=np.float64).ravel() 180 181 if x.size != d.size: 182 raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})") 183 n_samples = int(x.size) 184 185 outputs = np.zeros(n_samples, dtype=np.float64) 186 errors = np.zeros(n_samples, dtype=np.float64) 187 188 L = int(self.memory) 189 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 190 x_padded[L - 1 :] = x 191 192 last_u: Optional[np.ndarray] = None 193 194 for k in range(n_samples): 195 x_lin = x_padded[k : k + L][::-1] 196 u = self._create_volterra_regressor(x_lin) 197 last_u = u 198 199 y_k = float(np.dot(self.w, u)) 200 outputs[k] = y_k 201 202 e_k = float(d[k] - y_k) 203 errors[k] = e_k 204 205 if isinstance(self.step, np.ndarray): 206 self.w = self.w + (2.0 * self.step) * e_k * u 207 else: 208 self.w = self.w + (2.0 * float(self.step)) * e_k * u 209 210 self._record_history() 211 212 runtime_s = float(perf_counter() - t0) 213 if verbose: 214 print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms") 215 216 extra: Optional[Dict[str, Any]] = None 217 if return_internal_states: 218 extra = { 219 "last_regressor": None if last_u is None else last_u.copy(), 220 "memory": int(self.memory), 221 "n_coeffs": int(self.n_coeffs), 222 } 223 224 return self._pack_results( 225 outputs=outputs, 226 errors= errors, 227 runtime_s=runtime_s, 228 error_type="a_priori", 229 extra=extra, 230 )
Run Volterra LMS adaptation over (x[k], d[k]).
Parameters
input_signal:
Input sequence x[k], shape (N,).
desired_signal:
Desired sequence d[k], shape (N,).
verbose:
If True, prints runtime.
return_internal_states:
If True, returns selected internal values in result.extra.
Returns
OptimizationResult outputs: Filter output y[k] (a priori). errors: A priori error e[k] = d[k] - y[k]. coefficients: History of Volterra coefficient vector w (stacked from base history). error_type: "a_priori".
Extra (when return_internal_states=True)
extra["last_regressor"]: Last Volterra regressor u[k] (length n_coeffs). extra["memory"]: Linear memory length L. extra["n_coeffs"]: Number of Volterra coefficients.
29class VolterraRLS(AdaptiveFilter): 30 """ 31 Volterra RLS (2nd-order) for real-valued adaptive filtering. 32 33 Implements Algorithm 11.2 (Diniz) using a second-order Volterra expansion 34 and an RLS update on the expanded regressor. 35 36 For linear memory length L (`memory`), the Volterra regressor is: 37 - linear terms: [x[k], x[k-1], ..., x[k-L+1]] 38 - quadratic terms (i <= j): 39 [x[k]^2, x[k]x[k-1], ..., x[k-L+1]^2] 40 41 Total number of coefficients: 42 n_coeffs = L + L(L+1)/2 43 44 Notes 45 ----- 46 - Real-valued only (enforced by `ensure_real_signals`). 47 - We return the *a priori* error by default: 48 e[k] = d[k] - y[k] with y[k] = w^T u[k] (before the weight update) 49 and set `error_type="a_priori"`. 50 - If `return_internal_states=True`, we also include posterior sequences in `extra`. 51 """ 52 53 supports_complex: bool = False 54 55 def __init__( 56 self, 57 memory: int = 3, 58 forgetting_factor: float = 0.98, 59 delta: float = 1.0, 60 w_init: Optional[ArrayLike] = None, 61 *, 62 safe_eps: float = 1e-12, 63 ) -> None: 64 """ 65 Parameters 66 ---------- 67 memory: 68 Linear memory length L. Determines number of Volterra coefficients: 69 n_coeffs = L + L(L+1)/2. 70 forgetting_factor: 71 Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. 72 delta: 73 Positive regularization for initializing the inverse correlation matrix: 74 P[0] = I / delta. 75 w_init: 76 Optional initial coefficient vector (length n_coeffs). If None, zeros. 77 safe_eps: 78 Small epsilon to guard denominators. 79 """ 80 memory = int(memory) 81 if memory <= 0: 82 raise ValueError(f"memory must be > 0. Got {memory}.") 83 84 lam = float(forgetting_factor) 85 if not (0.0 < lam <= 1.0): 86 raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.") 87 88 delta = float(delta) 89 if delta <= 0.0: 90 raise ValueError(f"delta must be > 0. Got delta={delta}.") 91 92 self.memory: int = memory 93 self.lam: float = lam 94 self._safe_eps: float = float(safe_eps) 95 96 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 97 98 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 99 100 self.w = np.asarray(self.w, dtype=np.float64) 101 102 if w_init is not None: 103 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 104 if w0.size != self.n_coeffs: 105 raise ValueError( 106 f"w_init must have length {self.n_coeffs}, got {w0.size}." 107 ) 108 self.w = w0.copy() 109 110 self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta) 111 112 self.w_history = [] 113 self._record_history() 114 115 def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray: 116 """ 117 Construct the 2nd-order Volterra regressor from a linear delay line. 118 119 Parameters 120 ---------- 121 x_lin: 122 Linear delay line of length `memory` ordered as: 123 [x[k], x[k-1], ..., x[k-L+1]]. 124 125 Returns 126 ------- 127 np.ndarray 128 Volterra regressor u[k] of length n_coeffs: 129 [linear terms, quadratic terms (i<=j)]. 130 """ 131 x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1) 132 if x_lin.size != self.memory: 133 raise ValueError(f"x_lin must have length {self.memory}, got {x_lin.size}.") 134 135 quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64) 136 idx = 0 137 for i in range(self.memory): 138 for j in range(i, self.memory): 139 quad[idx] = x_lin[i] * x_lin[j] 140 idx += 1 141 142 return np.concatenate([x_lin, quad], axis=0) 143 144 @ensure_real_signals 145 @validate_input 146 def optimize( 147 self, 148 input_signal: np.ndarray, 149 desired_signal: np.ndarray, 150 verbose: bool = False, 151 return_internal_states: bool = False, 152 ) -> OptimizationResult: 153 """ 154 Run Volterra RLS adaptation over (x[k], d[k]). 155 156 Parameters 157 ---------- 158 input_signal: 159 Input sequence x[k], shape (N,). 160 desired_signal: 161 Desired sequence d[k], shape (N,). 162 verbose: 163 If True, prints runtime. 164 return_internal_states: 165 If True, includes additional sequences in `result.extra`. 166 167 Returns 168 ------- 169 OptimizationResult 170 outputs: 171 A priori output y[k] = w^T u[k]. 172 errors: 173 A priori error e[k] = d[k] - y[k]. 174 coefficients: 175 History of Volterra coefficients w (stacked from base history). 176 error_type: 177 "a_priori". 178 179 Extra (when return_internal_states=True) 180 -------------------------------------- 181 extra["posteriori_outputs"]: 182 Output after the weight update (y_post). 183 extra["posteriori_errors"]: 184 Error after the weight update (e_post). 185 extra["last_gain"]: 186 Last RLS gain vector k (shape (n_coeffs,)). 187 extra["last_den"]: 188 Last denominator (scalar). 189 extra["last_regressor"]: 190 Last Volterra regressor u[k]. 191 """ 192 t0 = perf_counter() 193 194 x = np.asarray(input_signal, dtype=np.float64).ravel() 195 d = np.asarray(desired_signal, dtype=np.float64).ravel() 196 197 n_samples = int(x.size) 198 199 outputs = np.zeros(n_samples, dtype=np.float64) 200 errors = np.zeros(n_samples, dtype=np.float64) 201 202 y_post = np.zeros(n_samples, dtype=np.float64) 203 e_post = np.zeros(n_samples, dtype=np.float64) 204 205 L = int(self.memory) 206 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 207 x_padded[L - 1 :] = x 208 209 last_k: Optional[np.ndarray] = None 210 last_den: Optional[float] = None 211 last_u: Optional[np.ndarray] = None 212 213 for k in range(n_samples): 214 x_lin = x_padded[k : k + L][::-1] 215 u = self._create_volterra_regressor(x_lin) 216 last_u = u 217 218 y_k = float(np.dot(self.w, u)) 219 e_k = float(d[k] - y_k) 220 outputs[k] = y_k 221 errors[k] = e_k 222 223 Pu = self.P @ u 224 den = float(self.lam + np.dot(u, Pu)) 225 if abs(den) < self._safe_eps: 226 den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps) 227 228 k_gain = Pu / den 229 last_k = k_gain 230 last_den = den 231 232 self.w = self.w + k_gain * e_k 233 234 self.P = (self.P - np.outer(k_gain, Pu)) / self.lam 235 236 yk_post = float(np.dot(self.w, u)) 237 ek_post = float(d[k] - yk_post) 238 y_post[k] = yk_post 239 e_post[k] = ek_post 240 241 self._record_history() 242 243 runtime_s = float(perf_counter() - t0) 244 if verbose: 245 print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms") 246 247 extra: Optional[Dict[str, Any]] = None 248 if return_internal_states: 249 extra = { 250 "posteriori_outputs": y_post, 251 "posteriori_errors": e_post, 252 "last_gain": None if last_k is None else last_k.copy(), 253 "last_den": last_den, 254 "last_regressor": None if last_u is None else last_u.copy(), 255 "memory": int(self.memory), 256 "n_coeffs": int(self.n_coeffs), 257 "forgetting_factor": float(self.lam), 258 } 259 260 return self._pack_results( 261 outputs=outputs, 262 errors=errors, 263 runtime_s=runtime_s, 264 error_type="a_priori", 265 extra=extra, 266 )
Volterra RLS (2nd-order) for real-valued adaptive filtering.
Implements Algorithm 11.2 (Diniz) using a second-order Volterra expansion and an RLS update on the expanded regressor.
For linear memory length L (memory), the Volterra regressor is:
- linear terms: [x[k], x[k-1], ..., x[k-L+1]]
- quadratic terms (i <= j): [x[k]^2, x[k]x[k-1], ..., x[k-L+1]^2]
Total number of coefficients: n_coeffs = L + L(L+1)/2
Notes
- Real-valued only (enforced by
ensure_real_signals). - We return the a priori error by default:
e[k] = d[k] - y[k] with y[k] = w^T u[k] (before the weight update)
and set
error_type="a_priori". - If
return_internal_states=True, we also include posterior sequences inextra.
55 def __init__( 56 self, 57 memory: int = 3, 58 forgetting_factor: float = 0.98, 59 delta: float = 1.0, 60 w_init: Optional[ArrayLike] = None, 61 *, 62 safe_eps: float = 1e-12, 63 ) -> None: 64 """ 65 Parameters 66 ---------- 67 memory: 68 Linear memory length L. Determines number of Volterra coefficients: 69 n_coeffs = L + L(L+1)/2. 70 forgetting_factor: 71 Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. 72 delta: 73 Positive regularization for initializing the inverse correlation matrix: 74 P[0] = I / delta. 75 w_init: 76 Optional initial coefficient vector (length n_coeffs). If None, zeros. 77 safe_eps: 78 Small epsilon to guard denominators. 79 """ 80 memory = int(memory) 81 if memory <= 0: 82 raise ValueError(f"memory must be > 0. Got {memory}.") 83 84 lam = float(forgetting_factor) 85 if not (0.0 < lam <= 1.0): 86 raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.") 87 88 delta = float(delta) 89 if delta <= 0.0: 90 raise ValueError(f"delta must be > 0. Got delta={delta}.") 91 92 self.memory: int = memory 93 self.lam: float = lam 94 self._safe_eps: float = float(safe_eps) 95 96 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 97 98 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 99 100 self.w = np.asarray(self.w, dtype=np.float64) 101 102 if w_init is not None: 103 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 104 if w0.size != self.n_coeffs: 105 raise ValueError( 106 f"w_init must have length {self.n_coeffs}, got {w0.size}." 107 ) 108 self.w = w0.copy() 109 110 self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta) 111 112 self.w_history = [] 113 self._record_history()
Parameters
memory: Linear memory length L. Determines number of Volterra coefficients: n_coeffs = L + L(L+1)/2. forgetting_factor: Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. delta: Positive regularization for initializing the inverse correlation matrix: P[0] = I / delta. w_init: Optional initial coefficient vector (length n_coeffs). If None, zeros. safe_eps: Small epsilon to guard denominators.
144 @ensure_real_signals 145 @validate_input 146 def optimize( 147 self, 148 input_signal: np.ndarray, 149 desired_signal: np.ndarray, 150 verbose: bool = False, 151 return_internal_states: bool = False, 152 ) -> OptimizationResult: 153 """ 154 Run Volterra RLS adaptation over (x[k], d[k]). 155 156 Parameters 157 ---------- 158 input_signal: 159 Input sequence x[k], shape (N,). 160 desired_signal: 161 Desired sequence d[k], shape (N,). 162 verbose: 163 If True, prints runtime. 164 return_internal_states: 165 If True, includes additional sequences in `result.extra`. 166 167 Returns 168 ------- 169 OptimizationResult 170 outputs: 171 A priori output y[k] = w^T u[k]. 172 errors: 173 A priori error e[k] = d[k] - y[k]. 174 coefficients: 175 History of Volterra coefficients w (stacked from base history). 176 error_type: 177 "a_priori". 178 179 Extra (when return_internal_states=True) 180 -------------------------------------- 181 extra["posteriori_outputs"]: 182 Output after the weight update (y_post). 183 extra["posteriori_errors"]: 184 Error after the weight update (e_post). 185 extra["last_gain"]: 186 Last RLS gain vector k (shape (n_coeffs,)). 187 extra["last_den"]: 188 Last denominator (scalar). 189 extra["last_regressor"]: 190 Last Volterra regressor u[k]. 191 """ 192 t0 = perf_counter() 193 194 x = np.asarray(input_signal, dtype=np.float64).ravel() 195 d = np.asarray(desired_signal, dtype=np.float64).ravel() 196 197 n_samples = int(x.size) 198 199 outputs = np.zeros(n_samples, dtype=np.float64) 200 errors = np.zeros(n_samples, dtype=np.float64) 201 202 y_post = np.zeros(n_samples, dtype=np.float64) 203 e_post = np.zeros(n_samples, dtype=np.float64) 204 205 L = int(self.memory) 206 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 207 x_padded[L - 1 :] = x 208 209 last_k: Optional[np.ndarray] = None 210 last_den: Optional[float] = None 211 last_u: Optional[np.ndarray] = None 212 213 for k in range(n_samples): 214 x_lin = x_padded[k : k + L][::-1] 215 u = self._create_volterra_regressor(x_lin) 216 last_u = u 217 218 y_k = float(np.dot(self.w, u)) 219 e_k = float(d[k] - y_k) 220 outputs[k] = y_k 221 errors[k] = e_k 222 223 Pu = self.P @ u 224 den = float(self.lam + np.dot(u, Pu)) 225 if abs(den) < self._safe_eps: 226 den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps) 227 228 k_gain = Pu / den 229 last_k = k_gain 230 last_den = den 231 232 self.w = self.w + k_gain * e_k 233 234 self.P = (self.P - np.outer(k_gain, Pu)) / self.lam 235 236 yk_post = float(np.dot(self.w, u)) 237 ek_post = float(d[k] - yk_post) 238 y_post[k] = yk_post 239 e_post[k] = ek_post 240 241 self._record_history() 242 243 runtime_s = float(perf_counter() - t0) 244 if verbose: 245 print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms") 246 247 extra: Optional[Dict[str, Any]] = None 248 if return_internal_states: 249 extra = { 250 "posteriori_outputs": y_post, 251 "posteriori_errors": e_post, 252 "last_gain": None if last_k is None else last_k.copy(), 253 "last_den": last_den, 254 "last_regressor": None if last_u is None else last_u.copy(), 255 "memory": int(self.memory), 256 "n_coeffs": int(self.n_coeffs), 257 "forgetting_factor": float(self.lam), 258 } 259 260 return self._pack_results( 261 outputs=outputs, 262 errors=errors, 263 runtime_s=runtime_s, 264 error_type="a_priori", 265 extra=extra, 266 )
Run Volterra RLS adaptation over (x[k], d[k]).
Parameters
input_signal:
Input sequence x[k], shape (N,).
desired_signal:
Desired sequence d[k], shape (N,).
verbose:
If True, prints runtime.
return_internal_states:
If True, includes additional sequences in result.extra.
Returns
OptimizationResult outputs: A priori output y[k] = w^T u[k]. errors: A priori error e[k] = d[k] - y[k]. coefficients: History of Volterra coefficients w (stacked from base history). error_type: "a_priori".
Extra (when return_internal_states=True)
extra["posteriori_outputs"]: Output after the weight update (y_post). extra["posteriori_errors"]: Error after the weight update (e_post). extra["last_gain"]: Last RLS gain vector k (shape (n_coeffs,)). extra["last_den"]: Last denominator (scalar). extra["last_regressor"]: Last Volterra regressor u[k].
27class CFDLMS(AdaptiveFilter): 28 """ 29 Implements the Constrained Frequency-Domain LMS (CFDLMS) algorithm for real-valued data. 30 (Algorithm 12.4, Diniz) 31 32 Notes 33 ----- 34 - This algorithm is block-based: each iteration produces L time-domain outputs. 35 - Internally it uses complex FFT processing; outputs/errors returned are real. 36 - Coefficients are a subband matrix ww with shape (M, Nw+1). 37 - For compatibility with the base class, `self.w` stores a flattened view of `ww`. 38 The returned OptimizationResult.coefficients still comes from `self.w_history` 39 (flattened), and the full matrix history is provided in `extra["ww_history"]`. 40 """ 41 supports_complex: bool = False 42 43 M: int 44 L: int 45 Nw: int 46 step: float 47 gamma: float 48 smoothing: float 49 50 def __init__( 51 self, 52 filter_order: int = 5, 53 n_subbands: int = 64, 54 decimation: Optional[int] = None, 55 step: float = 0.1, 56 gamma: float = 1e-2, 57 smoothing: float = 0.01, 58 w_init: Optional[Union[np.ndarray, list]] = None, 59 ) -> None: 60 if n_subbands <= 0: 61 raise ValueError("n_subbands (M) must be a positive integer.") 62 if filter_order < 0: 63 raise ValueError("filter_order (Nw) must be >= 0.") 64 if decimation is None: 65 decimation = n_subbands // 2 66 if decimation <= 0 or decimation > n_subbands: 67 raise ValueError("decimation (L) must satisfy 1 <= L <= M.") 68 if gamma <= 0: 69 raise ValueError("gamma must be > 0.") 70 if not (0.0 < smoothing <= 1.0): 71 raise ValueError("smoothing must be in (0, 1].") 72 73 self.M = int(n_subbands) 74 self.L = int(decimation) 75 self.Nw = int(filter_order) 76 77 self.step = float(step) 78 self.gamma = float(gamma) 79 self.smoothing = float(smoothing) 80 81 n_params = self.M * (self.Nw + 1) 82 super().__init__(filter_order=n_params - 1, w_init=None) 83 84 self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 85 if w_init is not None: 86 w0 = np.asarray(w_init) 87 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 88 self.ww = w0.astype(np.complex128, copy=True) 89 else: 90 w0 = w0.reshape(-1) 91 if w0.size != n_params: 92 raise ValueError( 93 f"w_init has incompatible size. Expected {n_params} " 94 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 95 ) 96 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 97 98 self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 99 self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64) 100 101 self.w = self.ww.reshape(-1).astype(float, copy=False) 102 self.w_history = [] 103 self._record_history() 104 105 self.ww_history: list[np.ndarray] = [] 106 107 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 108 """ 109 Reset coefficients/history. 110 111 If w_new is: 112 - None: zeros 113 - shape (M, Nw+1): used directly 114 - flat of length M*(Nw+1): reshaped 115 """ 116 n_params = self.M * (self.Nw + 1) 117 118 if w_new is None: 119 self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 120 else: 121 w0 = np.asarray(w_new) 122 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 123 self.ww = w0.astype(np.complex128, copy=True) 124 else: 125 w0 = w0.reshape(-1) 126 if w0.size != n_params: 127 raise ValueError( 128 f"w_new has incompatible size. Expected {n_params} " 129 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 130 ) 131 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 132 133 self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 134 self.sig = np.zeros(self.M, dtype=np.float64) 135 136 self.ww_history = [] 137 self.w = self.ww.reshape(-1).astype(float, copy=False) 138 self.w_history = [] 139 self._record_history() 140 141 @ensure_real_signals 142 @validate_input 143 def optimize( 144 self, 145 input_signal: np.ndarray, 146 desired_signal: np.ndarray, 147 verbose: bool = False, 148 return_internal_states: bool = False, 149 ) -> OptimizationResult: 150 """ 151 Executes the CFDLMS weight update process. 152 153 Parameters 154 ---------- 155 input_signal: 156 Input signal x[n] (real-valued). 157 desired_signal: 158 Desired signal d[n] (real-valued). 159 verbose: 160 If True, prints runtime. 161 return_internal_states: 162 If True, returns additional internal trajectories in result.extra. 163 164 Returns 165 ------- 166 OptimizationResult 167 outputs: 168 Estimated output signal (real), length = n_iters * L. 169 errors: 170 Output error signal (real), same length as outputs. 171 coefficients: 172 Flattened coefficient history (from base `w_history`). 173 error_type: 174 "output_error". 175 176 Extra (always) 177 ------------- 178 extra["ww_history"]: 179 List of coefficient matrices ww over iterations; each entry has shape (M, Nw+1). 180 extra["n_iters"]: 181 Number of block iterations. 182 183 Extra (when return_internal_states=True) 184 -------------------------------------- 185 extra["sig"]: 186 Final smoothed energy per bin (M,). 187 extra["sig_history"]: 188 Energy history per iteration (n_iters, M). 189 """ 190 tic: float = time() 191 192 x = np.asarray(input_signal, dtype=np.float64).ravel() 193 d = np.asarray(desired_signal, dtype=np.float64).ravel() 194 195 M = self.M 196 L = self.L 197 Nw = self.Nw 198 199 max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0 200 max_iters_from_d = int(d.size // L) 201 n_iters = max(0, min(max_iters_from_x, max_iters_from_d)) 202 203 out_len = n_iters * L 204 outputs = np.zeros(out_len, dtype=np.float64) 205 errors = np.zeros(out_len, dtype=np.float64) 206 207 xpad = np.concatenate([np.zeros(L, dtype=np.float64), x]) 208 209 self.ww_history = [] 210 211 sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None 212 213 uu = self.uu 214 ww = self.ww 215 sig = self.sig 216 217 a = self.smoothing 218 u_step = self.step 219 gamma = self.gamma 220 sqrtM = np.sqrt(M) 221 222 for k in range(n_iters): 223 start = k * L 224 seg_x = xpad[start : start + M] 225 226 x_p = seg_x[::-1].astype(np.complex128, copy=False) 227 228 d_seg = d[start : start + L] 229 d_p = d_seg[::-1].astype(np.complex128, copy=False) 230 231 ui = np.fft.fft(x_p) / sqrtM 232 233 uu[:, 1:] = uu[:, :-1] 234 uu[:, 0] = ui 235 236 uy = np.sum(uu * ww, axis=1) 237 238 y_block = np.fft.ifft(uy) * sqrtM 239 y_firstL = y_block[:L] 240 241 e_rev = d_p - y_firstL 242 243 y_time = np.real(y_firstL[::-1]) 244 e_time = d_seg - y_time 245 246 outputs[start : start + L] = y_time 247 errors[start : start + L] = e_time 248 249 e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)]) 250 et = np.fft.fft(e_pad) / sqrtM 251 sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2) 252 253 denom = gamma + (Nw + 1) * sig 254 gain = u_step / denom 255 256 wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False) 257 258 waux = np.fft.fft(wwc, axis=0) / sqrtM 259 waux[L:, :] = 0.0 260 wwc_c = np.fft.ifft(waux, axis=0) * sqrtM 261 262 ww = ww + wwc_c 263 264 self.ww_history.append(ww.copy()) 265 266 self.w = np.real(ww.reshape(-1)).astype(float, copy=False) 267 self._record_history() 268 269 if return_internal_states and sig_hist is not None: 270 sig_hist[k, :] = sig 271 272 self.uu = uu 273 self.ww = ww 274 self.sig = sig 275 276 runtime_s: float = float(time() - tic) 277 if verbose: 278 print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}") 279 280 extra: Dict[str, Any] = { 281 "ww_history": self.ww_history, 282 "n_iters": int(n_iters), 283 } 284 if return_internal_states: 285 extra["sig"] = sig.copy() 286 extra["sig_history"] = sig_hist 287 288 return self._pack_results( 289 outputs=outputs, 290 errors=errors, 291 runtime_s=runtime_s, 292 error_type="output_error", 293 extra=extra, 294 )
Implements the Constrained Frequency-Domain LMS (CFDLMS) algorithm for real-valued data. (Algorithm 12.4, Diniz)
Notes
- This algorithm is block-based: each iteration produces L time-domain outputs.
- Internally it uses complex FFT processing; outputs/errors returned are real.
- Coefficients are a subband matrix ww with shape (M, Nw+1).
- For compatibility with the base class,
self.wstores a flattened view ofww. The returned OptimizationResult.coefficients still comes fromself.w_history(flattened), and the full matrix history is provided inextra["ww_history"].
50 def __init__( 51 self, 52 filter_order: int = 5, 53 n_subbands: int = 64, 54 decimation: Optional[int] = None, 55 step: float = 0.1, 56 gamma: float = 1e-2, 57 smoothing: float = 0.01, 58 w_init: Optional[Union[np.ndarray, list]] = None, 59 ) -> None: 60 if n_subbands <= 0: 61 raise ValueError("n_subbands (M) must be a positive integer.") 62 if filter_order < 0: 63 raise ValueError("filter_order (Nw) must be >= 0.") 64 if decimation is None: 65 decimation = n_subbands // 2 66 if decimation <= 0 or decimation > n_subbands: 67 raise ValueError("decimation (L) must satisfy 1 <= L <= M.") 68 if gamma <= 0: 69 raise ValueError("gamma must be > 0.") 70 if not (0.0 < smoothing <= 1.0): 71 raise ValueError("smoothing must be in (0, 1].") 72 73 self.M = int(n_subbands) 74 self.L = int(decimation) 75 self.Nw = int(filter_order) 76 77 self.step = float(step) 78 self.gamma = float(gamma) 79 self.smoothing = float(smoothing) 80 81 n_params = self.M * (self.Nw + 1) 82 super().__init__(filter_order=n_params - 1, w_init=None) 83 84 self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 85 if w_init is not None: 86 w0 = np.asarray(w_init) 87 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 88 self.ww = w0.astype(np.complex128, copy=True) 89 else: 90 w0 = w0.reshape(-1) 91 if w0.size != n_params: 92 raise ValueError( 93 f"w_init has incompatible size. Expected {n_params} " 94 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 95 ) 96 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 97 98 self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 99 self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64) 100 101 self.w = self.ww.reshape(-1).astype(float, copy=False) 102 self.w_history = [] 103 self._record_history() 104 105 self.ww_history: list[np.ndarray] = []
107 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 108 """ 109 Reset coefficients/history. 110 111 If w_new is: 112 - None: zeros 113 - shape (M, Nw+1): used directly 114 - flat of length M*(Nw+1): reshaped 115 """ 116 n_params = self.M * (self.Nw + 1) 117 118 if w_new is None: 119 self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 120 else: 121 w0 = np.asarray(w_new) 122 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 123 self.ww = w0.astype(np.complex128, copy=True) 124 else: 125 w0 = w0.reshape(-1) 126 if w0.size != n_params: 127 raise ValueError( 128 f"w_new has incompatible size. Expected {n_params} " 129 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 130 ) 131 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 132 133 self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 134 self.sig = np.zeros(self.M, dtype=np.float64) 135 136 self.ww_history = [] 137 self.w = self.ww.reshape(-1).astype(float, copy=False) 138 self.w_history = [] 139 self._record_history()
Reset coefficients/history.
If w_new is:
- None: zeros
- shape (M, Nw+1): used directly
- flat of length M*(Nw+1): reshaped
141 @ensure_real_signals 142 @validate_input 143 def optimize( 144 self, 145 input_signal: np.ndarray, 146 desired_signal: np.ndarray, 147 verbose: bool = False, 148 return_internal_states: bool = False, 149 ) -> OptimizationResult: 150 """ 151 Executes the CFDLMS weight update process. 152 153 Parameters 154 ---------- 155 input_signal: 156 Input signal x[n] (real-valued). 157 desired_signal: 158 Desired signal d[n] (real-valued). 159 verbose: 160 If True, prints runtime. 161 return_internal_states: 162 If True, returns additional internal trajectories in result.extra. 163 164 Returns 165 ------- 166 OptimizationResult 167 outputs: 168 Estimated output signal (real), length = n_iters * L. 169 errors: 170 Output error signal (real), same length as outputs. 171 coefficients: 172 Flattened coefficient history (from base `w_history`). 173 error_type: 174 "output_error". 175 176 Extra (always) 177 ------------- 178 extra["ww_history"]: 179 List of coefficient matrices ww over iterations; each entry has shape (M, Nw+1). 180 extra["n_iters"]: 181 Number of block iterations. 182 183 Extra (when return_internal_states=True) 184 -------------------------------------- 185 extra["sig"]: 186 Final smoothed energy per bin (M,). 187 extra["sig_history"]: 188 Energy history per iteration (n_iters, M). 189 """ 190 tic: float = time() 191 192 x = np.asarray(input_signal, dtype=np.float64).ravel() 193 d = np.asarray(desired_signal, dtype=np.float64).ravel() 194 195 M = self.M 196 L = self.L 197 Nw = self.Nw 198 199 max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0 200 max_iters_from_d = int(d.size // L) 201 n_iters = max(0, min(max_iters_from_x, max_iters_from_d)) 202 203 out_len = n_iters * L 204 outputs = np.zeros(out_len, dtype=np.float64) 205 errors = np.zeros(out_len, dtype=np.float64) 206 207 xpad = np.concatenate([np.zeros(L, dtype=np.float64), x]) 208 209 self.ww_history = [] 210 211 sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None 212 213 uu = self.uu 214 ww = self.ww 215 sig = self.sig 216 217 a = self.smoothing 218 u_step = self.step 219 gamma = self.gamma 220 sqrtM = np.sqrt(M) 221 222 for k in range(n_iters): 223 start = k * L 224 seg_x = xpad[start : start + M] 225 226 x_p = seg_x[::-1].astype(np.complex128, copy=False) 227 228 d_seg = d[start : start + L] 229 d_p = d_seg[::-1].astype(np.complex128, copy=False) 230 231 ui = np.fft.fft(x_p) / sqrtM 232 233 uu[:, 1:] = uu[:, :-1] 234 uu[:, 0] = ui 235 236 uy = np.sum(uu * ww, axis=1) 237 238 y_block = np.fft.ifft(uy) * sqrtM 239 y_firstL = y_block[:L] 240 241 e_rev = d_p - y_firstL 242 243 y_time = np.real(y_firstL[::-1]) 244 e_time = d_seg - y_time 245 246 outputs[start : start + L] = y_time 247 errors[start : start + L] = e_time 248 249 e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)]) 250 et = np.fft.fft(e_pad) / sqrtM 251 sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2) 252 253 denom = gamma + (Nw + 1) * sig 254 gain = u_step / denom 255 256 wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False) 257 258 waux = np.fft.fft(wwc, axis=0) / sqrtM 259 waux[L:, :] = 0.0 260 wwc_c = np.fft.ifft(waux, axis=0) * sqrtM 261 262 ww = ww + wwc_c 263 264 self.ww_history.append(ww.copy()) 265 266 self.w = np.real(ww.reshape(-1)).astype(float, copy=False) 267 self._record_history() 268 269 if return_internal_states and sig_hist is not None: 270 sig_hist[k, :] = sig 271 272 self.uu = uu 273 self.ww = ww 274 self.sig = sig 275 276 runtime_s: float = float(time() - tic) 277 if verbose: 278 print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}") 279 280 extra: Dict[str, Any] = { 281 "ww_history": self.ww_history, 282 "n_iters": int(n_iters), 283 } 284 if return_internal_states: 285 extra["sig"] = sig.copy() 286 extra["sig_history"] = sig_hist 287 288 return self._pack_results( 289 outputs=outputs, 290 errors=errors, 291 runtime_s=runtime_s, 292 error_type="output_error", 293 extra=extra, 294 )
Executes the CFDLMS weight update process.
Parameters
input_signal: Input signal x[n] (real-valued). desired_signal: Desired signal d[n] (real-valued). verbose: If True, prints runtime. return_internal_states: If True, returns additional internal trajectories in result.extra.
Returns
OptimizationResult
outputs:
Estimated output signal (real), length = n_iters * L.
errors:
Output error signal (real), same length as outputs.
coefficients:
Flattened coefficient history (from base w_history).
error_type:
"output_error".
Extra (always)
extra["ww_history"]: List of coefficient matrices ww over iterations; each entry has shape (M, Nw+1). extra["n_iters"]: Number of block iterations.
Extra (when return_internal_states=True)
extra["sig"]: Final smoothed energy per bin (M,). extra["sig_history"]: Energy history per iteration (n_iters, M).
67class DLCLLMS(AdaptiveFilter): 68 """ 69 Implements the Delayless Closed-Loop Subband LMS adaptive-filtering algorithm (DLCLLMS) 70 for real-valued fullband data. (Algorithm 12.3, Diniz) 71 72 Notes 73 ----- 74 - Processing is block-based with block length L = M (number of subbands). 75 - Internally uses complex subband signals (DFT analysis bank). 76 - The mapped equivalent fullband FIR GG (length M*Nw) is exposed via `self.w` (float). 77 - For compatibility with the base class, `OptimizationResult.coefficients` returns 78 `self.w_history` which stores the mapped GG **once per processed block**. 79 """ 80 supports_complex: bool = False 81 82 def __init__( 83 self, 84 filter_order: int = 5, 85 n_subbands: int = 4, 86 step: float = 0.1, 87 gamma: float = 1e-2, 88 a: float = 1e-2, 89 nyquist_len: int = 2, 90 w_init: Optional[Union[np.ndarray, list]] = None, 91 ) -> None: 92 self.M: int = int(n_subbands) 93 if self.M <= 0: 94 raise ValueError("n_subbands must be a positive integer.") 95 96 self.Nw: int = int(filter_order) 97 if self.Nw <= 0: 98 raise ValueError("filter_order must be a positive integer.") 99 100 self.step: float = float(step) 101 self.gamma: float = float(gamma) 102 self.a: float = float(a) 103 104 self.nyquist_len: int = int(nyquist_len) 105 if self.nyquist_len <= 0: 106 raise ValueError("nyquist_len must be a positive integer.") 107 108 self._full_len: int = int(self.M * self.Nw) 109 110 super().__init__(filter_order=self._full_len - 1, w_init=None) 111 112 self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len) 113 self._P: int = int(self.Ed.shape[1]) 114 self._Dint: int = int((self._P - 1) // 2) 115 116 self.F: np.ndarray = _dft_matrix(self.M) 117 118 self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 119 if w_init is not None: 120 w0 = np.asarray(w_init) 121 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 122 self.w_sb = w0.astype(complex, copy=True) 123 else: 124 w0 = w0.reshape(-1) 125 if w0.size != self.M * (self.Nw + 1): 126 raise ValueError( 127 f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} " 128 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 129 ) 130 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 131 132 self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 133 134 self.sig: np.ndarray = np.zeros((self.M,), dtype=float) 135 136 self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 137 self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 138 139 self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float) 140 141 self.w_history = [] 142 self._record_history() 143 144 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 145 """ 146 Reset coefficients and history. 147 148 - If w_new is provided: 149 * If shape (M, Nw+1): interpreted as subband coefficients. 150 * If flat of length M*(Nw+1): reshaped as subband coefficients. 151 - Resets internal states (x_cl, sig, fractional-delay, FIR state). 152 """ 153 if w_new is None: 154 self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex) 155 else: 156 w0 = np.asarray(w_new) 157 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 158 self.w_sb = w0.astype(complex, copy=True) 159 else: 160 w0 = w0.reshape(-1) 161 if w0.size != self.M * (self.Nw + 1): 162 raise ValueError( 163 f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} " 164 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 165 ) 166 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 167 168 self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex) 169 self.sig = np.zeros((self.M,), dtype=float) 170 self._xx_frac = np.zeros((self._P, self.M), dtype=float) 171 self._ee_frac = np.zeros((self._P, self.M), dtype=float) 172 self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float) 173 174 GG = self._equivalent_fullband() 175 self.w = GG.astype(float, copy=True) 176 self.w_history = [] 177 self._record_history() 178 179 def _equivalent_fullband(self) -> np.ndarray: 180 """ 181 Build the equivalent fullband FIR GG (length M*Nw) from current subband coefficients, 182 matching the MATLAB mapping. 183 184 Returns 185 ------- 186 GG : np.ndarray, shape (M*Nw,), dtype=float 187 """ 188 ww = np.real(self.F.conj().T @ self.w_sb) / float(self.M) 189 190 G = np.zeros((self.M, self.Nw), dtype=float) 191 G[0, :] = ww[0, : self.Nw] 192 193 for m in range(1, self.M): 194 aux = np.convolve(self.Ed[m - 1, :], ww[m, :], mode="full") 195 start = self._Dint + 1 196 stop = start + self.Nw 197 G[m, :] = aux[start:stop] 198 199 GG = G.reshape(-1, order="F") 200 return GG 201 202 def _fir_block(self, b: np.ndarray, x_block: np.ndarray) -> np.ndarray: 203 """ 204 FIR filtering with state, matching MATLAB `filter(b,1,x,zi)` block-by-block. 205 """ 206 Lb = int(b.size) 207 if Lb == 0: 208 return np.zeros_like(x_block, dtype=float) 209 if Lb == 1: 210 return float(b[0]) * x_block 211 212 y = np.zeros_like(x_block, dtype=float) 213 state = self._x_state 214 215 for i, x_n in enumerate(x_block): 216 acc = float(b[0]) * float(x_n) 217 if Lb > 1 and state.size > 0: 218 acc += float(np.dot(b[1:], state[: Lb - 1])) 219 y[i] = acc 220 221 if state.size > 0: 222 state[1:] = state[:-1] 223 state[0] = float(x_n) 224 225 self._x_state = state 226 return y 227 228 @ensure_real_signals 229 @validate_input 230 def optimize( 231 self, 232 input_signal: np.ndarray, 233 desired_signal: np.ndarray, 234 verbose: bool = False, 235 return_internal_states: bool = False, 236 ) -> OptimizationResult: 237 """ 238 Executes the adaptation process for the DLCLLMS algorithm (faithful to dlcllms.m). 239 240 Returns 241 ------- 242 OptimizationResult 243 outputs: 244 Estimated fullband output y[n] (real), same length as input_signal. 245 errors: 246 Fullband error e[n] = d[n] - y[n] (real). 247 coefficients: 248 History of equivalent fullband FIR vectors GG (length M*Nw), 249 stored once per processed block (plus the initial entry). 250 251 Extra (always) 252 ------------- 253 extra["n_blocks"]: 254 Number of processed blocks. 255 extra["block_len"]: 256 Block length (equals M). 257 extra["n_used"]: 258 Number of samples actually processed (multiple of M). 259 260 Extra (when return_internal_states=True) 261 -------------------------------------- 262 extra["sig_history"]: 263 Smoothed subband power per block (n_blocks, M). 264 extra["w_sb_final"]: 265 Final subband coefficient matrix (M, Nw+1), complex. 266 """ 267 tic: float = time() 268 269 x = np.asarray(input_signal, dtype=float).ravel() 270 d = np.asarray(desired_signal, dtype=float).ravel() 271 272 n_samples: int = int(x.size) 273 M: int = int(self.M) 274 L: int = M 275 276 n_blocks: int = int(n_samples // L) 277 n_used: int = int(n_blocks * L) 278 279 outputs = np.zeros((n_samples,), dtype=float) 280 errors = np.zeros((n_samples,), dtype=float) 281 282 sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None 283 284 self.w_history = [] 285 self._record_history() 286 287 if n_blocks == 0: 288 errors = d - outputs 289 runtime_s: float = float(time() - tic) 290 extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0} 291 return self._pack_results( 292 outputs=outputs, 293 errors=errors, 294 runtime_s=runtime_s, 295 error_type="output_error", 296 extra=extra, 297 ) 298 299 for k in range(n_blocks): 300 i0 = k * L 301 i1 = i0 + L 302 303 x_block = x[i0:i1] 304 d_block = d[i0:i1] 305 306 x_p = x_block[::-1] 307 308 x_frac = np.zeros((M,), dtype=float) 309 for m in range(M): 310 self._xx_frac[1:, m] = self._xx_frac[:-1, m] 311 self._xx_frac[0, m] = x_p[m] 312 x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m])) 313 314 xsb = self.F @ x_frac.astype(complex) 315 316 GG = self._equivalent_fullband() 317 y_block = self._fir_block(GG, x_block) 318 319 outputs[i0:i1] = y_block 320 e_block = d_block - y_block 321 errors[i0:i1] = e_block 322 323 self.w = GG.astype(float, copy=True) 324 self._record_history() 325 326 e_p = e_block[::-1] 327 e_frac = np.zeros((M,), dtype=float) 328 for m in range(M): 329 self._ee_frac[1:, m] = self._ee_frac[:-1, m] 330 self._ee_frac[0, m] = e_p[m] 331 e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m])) 332 333 esb = self.F @ e_frac.astype(complex) 334 335 for m in range(M): 336 self.x_cl[m, 1:] = self.x_cl[m, :-1] 337 self.x_cl[m, 0] = xsb[m] 338 339 self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2) 340 341 mu_n = self.step / (self.gamma + (self.Nw + 1) * self.sig[m]) 342 343 self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :] 344 345 if return_internal_states and sig_hist is not None: 346 sig_hist[k, :] = self.sig 347 348 if n_used < n_samples: 349 outputs[n_used:] = 0.0 350 errors[n_used:] = d[n_used:] - outputs[n_used:] 351 352 runtime_s: float = float(time() - tic) 353 if verbose: 354 print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}") 355 356 extra: Dict[str, Any] = { 357 "n_blocks": int(n_blocks), 358 "block_len": int(L), 359 "n_used": int(n_used), 360 } 361 if return_internal_states: 362 extra.update( 363 { 364 "sig_history": sig_hist, 365 "w_sb_final": self.w_sb.copy(), 366 } 367 ) 368 369 return self._pack_results( 370 outputs=outputs, 371 errors=errors, 372 runtime_s=runtime_s, 373 error_type="output_error", 374 extra=extra, 375 )
Implements the Delayless Closed-Loop Subband LMS adaptive-filtering algorithm (DLCLLMS) for real-valued fullband data. (Algorithm 12.3, Diniz)
Notes
- Processing is block-based with block length L = M (number of subbands).
- Internally uses complex subband signals (DFT analysis bank).
- The mapped equivalent fullband FIR GG (length M*Nw) is exposed via
self.w(float). - For compatibility with the base class,
OptimizationResult.coefficientsreturnsself.w_historywhich stores the mapped GG once per processed block.
82 def __init__( 83 self, 84 filter_order: int = 5, 85 n_subbands: int = 4, 86 step: float = 0.1, 87 gamma: float = 1e-2, 88 a: float = 1e-2, 89 nyquist_len: int = 2, 90 w_init: Optional[Union[np.ndarray, list]] = None, 91 ) -> None: 92 self.M: int = int(n_subbands) 93 if self.M <= 0: 94 raise ValueError("n_subbands must be a positive integer.") 95 96 self.Nw: int = int(filter_order) 97 if self.Nw <= 0: 98 raise ValueError("filter_order must be a positive integer.") 99 100 self.step: float = float(step) 101 self.gamma: float = float(gamma) 102 self.a: float = float(a) 103 104 self.nyquist_len: int = int(nyquist_len) 105 if self.nyquist_len <= 0: 106 raise ValueError("nyquist_len must be a positive integer.") 107 108 self._full_len: int = int(self.M * self.Nw) 109 110 super().__init__(filter_order=self._full_len - 1, w_init=None) 111 112 self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len) 113 self._P: int = int(self.Ed.shape[1]) 114 self._Dint: int = int((self._P - 1) // 2) 115 116 self.F: np.ndarray = _dft_matrix(self.M) 117 118 self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 119 if w_init is not None: 120 w0 = np.asarray(w_init) 121 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 122 self.w_sb = w0.astype(complex, copy=True) 123 else: 124 w0 = w0.reshape(-1) 125 if w0.size != self.M * (self.Nw + 1): 126 raise ValueError( 127 f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} " 128 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 129 ) 130 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 131 132 self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 133 134 self.sig: np.ndarray = np.zeros((self.M,), dtype=float) 135 136 self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 137 self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 138 139 self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float) 140 141 self.w_history = [] 142 self._record_history()
144 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 145 """ 146 Reset coefficients and history. 147 148 - If w_new is provided: 149 * If shape (M, Nw+1): interpreted as subband coefficients. 150 * If flat of length M*(Nw+1): reshaped as subband coefficients. 151 - Resets internal states (x_cl, sig, fractional-delay, FIR state). 152 """ 153 if w_new is None: 154 self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex) 155 else: 156 w0 = np.asarray(w_new) 157 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 158 self.w_sb = w0.astype(complex, copy=True) 159 else: 160 w0 = w0.reshape(-1) 161 if w0.size != self.M * (self.Nw + 1): 162 raise ValueError( 163 f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} " 164 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 165 ) 166 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 167 168 self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex) 169 self.sig = np.zeros((self.M,), dtype=float) 170 self._xx_frac = np.zeros((self._P, self.M), dtype=float) 171 self._ee_frac = np.zeros((self._P, self.M), dtype=float) 172 self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float) 173 174 GG = self._equivalent_fullband() 175 self.w = GG.astype(float, copy=True) 176 self.w_history = [] 177 self._record_history()
Reset coefficients and history.
- If w_new is provided:
- If shape (M, Nw+1): interpreted as subband coefficients.
- If flat of length M*(Nw+1): reshaped as subband coefficients.
- Resets internal states (x_cl, sig, fractional-delay, FIR state).
228 @ensure_real_signals 229 @validate_input 230 def optimize( 231 self, 232 input_signal: np.ndarray, 233 desired_signal: np.ndarray, 234 verbose: bool = False, 235 return_internal_states: bool = False, 236 ) -> OptimizationResult: 237 """ 238 Executes the adaptation process for the DLCLLMS algorithm (faithful to dlcllms.m). 239 240 Returns 241 ------- 242 OptimizationResult 243 outputs: 244 Estimated fullband output y[n] (real), same length as input_signal. 245 errors: 246 Fullband error e[n] = d[n] - y[n] (real). 247 coefficients: 248 History of equivalent fullband FIR vectors GG (length M*Nw), 249 stored once per processed block (plus the initial entry). 250 251 Extra (always) 252 ------------- 253 extra["n_blocks"]: 254 Number of processed blocks. 255 extra["block_len"]: 256 Block length (equals M). 257 extra["n_used"]: 258 Number of samples actually processed (multiple of M). 259 260 Extra (when return_internal_states=True) 261 -------------------------------------- 262 extra["sig_history"]: 263 Smoothed subband power per block (n_blocks, M). 264 extra["w_sb_final"]: 265 Final subband coefficient matrix (M, Nw+1), complex. 266 """ 267 tic: float = time() 268 269 x = np.asarray(input_signal, dtype=float).ravel() 270 d = np.asarray(desired_signal, dtype=float).ravel() 271 272 n_samples: int = int(x.size) 273 M: int = int(self.M) 274 L: int = M 275 276 n_blocks: int = int(n_samples // L) 277 n_used: int = int(n_blocks * L) 278 279 outputs = np.zeros((n_samples,), dtype=float) 280 errors = np.zeros((n_samples,), dtype=float) 281 282 sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None 283 284 self.w_history = [] 285 self._record_history() 286 287 if n_blocks == 0: 288 errors = d - outputs 289 runtime_s: float = float(time() - tic) 290 extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0} 291 return self._pack_results( 292 outputs=outputs, 293 errors=errors, 294 runtime_s=runtime_s, 295 error_type="output_error", 296 extra=extra, 297 ) 298 299 for k in range(n_blocks): 300 i0 = k * L 301 i1 = i0 + L 302 303 x_block = x[i0:i1] 304 d_block = d[i0:i1] 305 306 x_p = x_block[::-1] 307 308 x_frac = np.zeros((M,), dtype=float) 309 for m in range(M): 310 self._xx_frac[1:, m] = self._xx_frac[:-1, m] 311 self._xx_frac[0, m] = x_p[m] 312 x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m])) 313 314 xsb = self.F @ x_frac.astype(complex) 315 316 GG = self._equivalent_fullband() 317 y_block = self._fir_block(GG, x_block) 318 319 outputs[i0:i1] = y_block 320 e_block = d_block - y_block 321 errors[i0:i1] = e_block 322 323 self.w = GG.astype(float, copy=True) 324 self._record_history() 325 326 e_p = e_block[::-1] 327 e_frac = np.zeros((M,), dtype=float) 328 for m in range(M): 329 self._ee_frac[1:, m] = self._ee_frac[:-1, m] 330 self._ee_frac[0, m] = e_p[m] 331 e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m])) 332 333 esb = self.F @ e_frac.astype(complex) 334 335 for m in range(M): 336 self.x_cl[m, 1:] = self.x_cl[m, :-1] 337 self.x_cl[m, 0] = xsb[m] 338 339 self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2) 340 341 mu_n = self.step / (self.gamma + (self.Nw + 1) * self.sig[m]) 342 343 self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :] 344 345 if return_internal_states and sig_hist is not None: 346 sig_hist[k, :] = self.sig 347 348 if n_used < n_samples: 349 outputs[n_used:] = 0.0 350 errors[n_used:] = d[n_used:] - outputs[n_used:] 351 352 runtime_s: float = float(time() - tic) 353 if verbose: 354 print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}") 355 356 extra: Dict[str, Any] = { 357 "n_blocks": int(n_blocks), 358 "block_len": int(L), 359 "n_used": int(n_used), 360 } 361 if return_internal_states: 362 extra.update( 363 { 364 "sig_history": sig_hist, 365 "w_sb_final": self.w_sb.copy(), 366 } 367 ) 368 369 return self._pack_results( 370 outputs=outputs, 371 errors=errors, 372 runtime_s=runtime_s, 373 error_type="output_error", 374 extra=extra, 375 )
Executes the adaptation process for the DLCLLMS algorithm (faithful to dlcllms.m).
Returns
OptimizationResult outputs: Estimated fullband output y[n] (real), same length as input_signal. errors: Fullband error e[n] = d[n] - y[n] (real). coefficients: History of equivalent fullband FIR vectors GG (length M*Nw), stored once per processed block (plus the initial entry).
Extra (always)
extra["n_blocks"]: Number of processed blocks. extra["block_len"]: Block length (equals M). extra["n_used"]: Number of samples actually processed (multiple of M).
Extra (when return_internal_states=True)
extra["sig_history"]: Smoothed subband power per block (n_blocks, M). extra["w_sb_final"]: Final subband coefficient matrix (M, Nw+1), complex.
50class OLSBLMS(AdaptiveFilter): 51 """ 52 Implements the Open-Loop Subband LMS (OLSBLMS) adaptive-filtering algorithm for real-valued data. 53 (Algorithm 12.1, Diniz) 54 55 Notes 56 ----- 57 - The adaptive coefficients are subband-wise: w has shape (M, Nw+1). 58 - For compatibility with the base class, `OptimizationResult.coefficients` will contain 59 a flattened history of the subband coefficient matrix (row-major flatten). 60 The full matrix history is provided in `extra["w_matrix_history"]`. 61 - The MATLAB reference typically evaluates MSE in subbands; here we also provide a 62 convenience fullband reconstruction via the synthesis bank. 63 """ 64 supports_complex: bool = False 65 66 M: int 67 Nw: int 68 L: int 69 step: float 70 gamma: float 71 a: float 72 73 def __init__( 74 self, 75 n_subbands: int, 76 analysis_filters: ArrayLike, 77 synthesis_filters: ArrayLike, 78 filter_order: int, 79 step: float = 0.1, 80 gamma: float = 1e-2, 81 a: float = 0.01, 82 decimation_factor: Optional[int] = None, 83 w_init: Optional[Union[np.ndarray, list]] = None, 84 ) -> None: 85 self.M = int(n_subbands) 86 if self.M <= 0: 87 raise ValueError("n_subbands must be a positive integer.") 88 89 self.Nw = int(filter_order) 90 if self.Nw <= 0: 91 raise ValueError("filter_order must be a positive integer.") 92 93 self.step = float(step) 94 self.gamma = float(gamma) 95 self.a = float(a) 96 97 hk = np.asarray(analysis_filters, dtype=float) 98 fk = np.asarray(synthesis_filters, dtype=float) 99 100 if hk.ndim != 2 or fk.ndim != 2: 101 raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).") 102 if hk.shape[0] != self.M or fk.shape[0] != self.M: 103 raise ValueError( 104 f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}." 105 ) 106 107 self.hk = hk 108 self.fk = fk 109 110 self.L = int(decimation_factor) if decimation_factor is not None else self.M 111 if self.L <= 0: 112 raise ValueError("decimation_factor L must be a positive integer.") 113 114 self._n_params = int(self.M * (self.Nw + 1)) 115 super().__init__(filter_order=self._n_params - 1, w_init=None) 116 117 self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float) 118 if w_init is not None: 119 w0 = np.asarray(w_init, dtype=float) 120 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 121 self.w_mat = w0.copy() 122 elif w0.ndim == 1 and w0.size == self._n_params: 123 self.w_mat = w0.reshape(self.M, self.Nw + 1).copy() 124 else: 125 raise ValueError( 126 "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). " 127 f"Got w_init.shape={w0.shape}." 128 ) 129 130 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 131 self.w_history = [] 132 self._record_history() 133 134 self.w_matrix_history: list[np.ndarray] = [] 135 136 def _sync_base_w(self) -> None: 137 """Keep base `self.w` consistent with the subband matrix.""" 138 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 139 140 @classmethod 141 def default_test_init_kwargs(cls, order: int) -> dict: 142 M = 1 143 hk = np.array([[1.0]], dtype=float) 144 fk = np.array([[1.0]], dtype=float) 145 return dict( 146 n_subbands=M, 147 analysis_filters=hk, 148 synthesis_filters=fk, 149 filter_order=order, 150 step=0.1, 151 gamma=1e-2, 152 a=0.01, 153 decimation_factor=1, 154 ) 155 156 @ensure_real_signals 157 @validate_input 158 def optimize( 159 self, 160 input_signal: np.ndarray, 161 desired_signal: np.ndarray, 162 verbose: bool = False, 163 return_internal_states: bool = False, 164 ) -> OptimizationResult: 165 """ 166 Executes the adaptation process for the Open-Loop Subband LMS (OLSBLMS) algorithm. 167 168 Returns 169 ------- 170 OptimizationResult 171 outputs: 172 Fullband reconstructed output y(n), same length as desired_signal. 173 errors: 174 Fullband output error e(n) = d(n) - y(n). 175 coefficients: 176 Flattened coefficient history (shape: (#snapshots, M*(Nw+1))). 177 178 Extra (always) 179 ------------- 180 extra["w_matrix_history"]: 181 List of coefficient matrices (M, Nw+1), one per subband-iteration. 182 extra["subband_outputs"], extra["subband_errors"]: 183 Arrays with shape (M, N_iter). 184 extra["mse_subbands"], extra["mse_overall"]: 185 MSE curves in subbands. 186 187 Extra (when return_internal_states=True) 188 -------------------------------------- 189 extra["sig_ol"]: 190 Final subband energy estimates (M,). 191 """ 192 tic: float = time() 193 194 x = np.asarray(input_signal, dtype=float).ravel() 195 d = np.asarray(desired_signal, dtype=float).ravel() 196 197 n_samples: int = int(x.size) 198 199 xsb_list: list[np.ndarray] = [] 200 dsb_list: list[np.ndarray] = [] 201 for m in range(self.M): 202 xaux_x = _fir_filter_causal(self.hk[m, :], x) 203 xaux_d = _fir_filter_causal(self.hk[m, :], d) 204 xsb_list.append(_decimate_by_L(xaux_x, self.L)) 205 dsb_list.append(_decimate_by_L(xaux_d, self.L)) 206 207 N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0 208 if N_iter == 0: 209 y0 = np.zeros_like(d) 210 runtime_s = float(time() - tic) 211 return self._pack_results( 212 outputs=y0, 213 errors=d - y0, 214 runtime_s=runtime_s, 215 error_type="output_error", 216 extra={ 217 "w_matrix_history": [], 218 "subband_outputs": np.zeros((self.M, 0), dtype=float), 219 "subband_errors": np.zeros((self.M, 0), dtype=float), 220 "mse_subbands": np.zeros((self.M, 0), dtype=float), 221 "mse_overall": np.zeros((0,), dtype=float), 222 }, 223 ) 224 225 xsb = np.vstack([arr[:N_iter] for arr in xsb_list]) 226 dsb = np.vstack([arr[:N_iter] for arr in dsb_list]) 227 228 y_sb = np.zeros((self.M, N_iter), dtype=float) 229 e_sb = np.zeros((self.M, N_iter), dtype=float) 230 231 x_ol = np.zeros((self.M, self.Nw + 1), dtype=float) 232 sig_ol = np.zeros((self.M,), dtype=float) 233 234 self.w_history = [] 235 self._record_history() 236 self.w_matrix_history = [] 237 238 for k in range(N_iter): 239 for m in range(self.M): 240 x_ol[m, 1:] = x_ol[m, :-1] 241 x_ol[m, 0] = xsb[m, k] 242 243 y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :])) 244 e_sb[m, k] = float(dsb[m, k] - y_sb[m, k]) 245 246 sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2) 247 248 mu_m = (2.0 * self.step) / (self.gamma + (self.Nw + 1) * sig_ol[m]) 249 250 self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :] 251 252 self.w_matrix_history.append(self.w_mat.copy()) 253 self._sync_base_w() 254 self._record_history() 255 256 y_full = np.zeros((n_samples,), dtype=float) 257 for m in range(self.M): 258 y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples) 259 y_full += _fir_filter_causal(self.fk[m, :], y_up) 260 261 e_full = d - y_full 262 263 mse_subbands = e_sb ** 2 264 mse_overall = np.mean(mse_subbands, axis=0) 265 266 runtime_s: float = float(time() - tic) 267 if verbose: 268 print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}") 269 270 extra: Dict[str, Any] = { 271 "w_matrix_history": self.w_matrix_history, 272 "subband_outputs": y_sb, 273 "subband_errors": e_sb, 274 "mse_subbands": mse_subbands, 275 "mse_overall": mse_overall, 276 } 277 if return_internal_states: 278 extra["sig_ol"] = sig_ol.copy() 279 280 return self._pack_results( 281 outputs=y_full, 282 errors=e_full, 283 runtime_s=runtime_s, 284 error_type="output_error", 285 extra=extra, 286 )
Implements the Open-Loop Subband LMS (OLSBLMS) adaptive-filtering algorithm for real-valued data. (Algorithm 12.1, Diniz)
Notes
- The adaptive coefficients are subband-wise: w has shape (M, Nw+1).
- For compatibility with the base class,
OptimizationResult.coefficientswill contain a flattened history of the subband coefficient matrix (row-major flatten). The full matrix history is provided inextra["w_matrix_history"]. - The MATLAB reference typically evaluates MSE in subbands; here we also provide a convenience fullband reconstruction via the synthesis bank.
73 def __init__( 74 self, 75 n_subbands: int, 76 analysis_filters: ArrayLike, 77 synthesis_filters: ArrayLike, 78 filter_order: int, 79 step: float = 0.1, 80 gamma: float = 1e-2, 81 a: float = 0.01, 82 decimation_factor: Optional[int] = None, 83 w_init: Optional[Union[np.ndarray, list]] = None, 84 ) -> None: 85 self.M = int(n_subbands) 86 if self.M <= 0: 87 raise ValueError("n_subbands must be a positive integer.") 88 89 self.Nw = int(filter_order) 90 if self.Nw <= 0: 91 raise ValueError("filter_order must be a positive integer.") 92 93 self.step = float(step) 94 self.gamma = float(gamma) 95 self.a = float(a) 96 97 hk = np.asarray(analysis_filters, dtype=float) 98 fk = np.asarray(synthesis_filters, dtype=float) 99 100 if hk.ndim != 2 or fk.ndim != 2: 101 raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).") 102 if hk.shape[0] != self.M or fk.shape[0] != self.M: 103 raise ValueError( 104 f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}." 105 ) 106 107 self.hk = hk 108 self.fk = fk 109 110 self.L = int(decimation_factor) if decimation_factor is not None else self.M 111 if self.L <= 0: 112 raise ValueError("decimation_factor L must be a positive integer.") 113 114 self._n_params = int(self.M * (self.Nw + 1)) 115 super().__init__(filter_order=self._n_params - 1, w_init=None) 116 117 self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float) 118 if w_init is not None: 119 w0 = np.asarray(w_init, dtype=float) 120 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 121 self.w_mat = w0.copy() 122 elif w0.ndim == 1 and w0.size == self._n_params: 123 self.w_mat = w0.reshape(self.M, self.Nw + 1).copy() 124 else: 125 raise ValueError( 126 "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). " 127 f"Got w_init.shape={w0.shape}." 128 ) 129 130 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 131 self.w_history = [] 132 self._record_history() 133 134 self.w_matrix_history: list[np.ndarray] = []
140 @classmethod 141 def default_test_init_kwargs(cls, order: int) -> dict: 142 M = 1 143 hk = np.array([[1.0]], dtype=float) 144 fk = np.array([[1.0]], dtype=float) 145 return dict( 146 n_subbands=M, 147 analysis_filters=hk, 148 synthesis_filters=fk, 149 filter_order=order, 150 step=0.1, 151 gamma=1e-2, 152 a=0.01, 153 decimation_factor=1, 154 )
Override in subclasses to provide init kwargs for standardized tests.
156 @ensure_real_signals 157 @validate_input 158 def optimize( 159 self, 160 input_signal: np.ndarray, 161 desired_signal: np.ndarray, 162 verbose: bool = False, 163 return_internal_states: bool = False, 164 ) -> OptimizationResult: 165 """ 166 Executes the adaptation process for the Open-Loop Subband LMS (OLSBLMS) algorithm. 167 168 Returns 169 ------- 170 OptimizationResult 171 outputs: 172 Fullband reconstructed output y(n), same length as desired_signal. 173 errors: 174 Fullband output error e(n) = d(n) - y(n). 175 coefficients: 176 Flattened coefficient history (shape: (#snapshots, M*(Nw+1))). 177 178 Extra (always) 179 ------------- 180 extra["w_matrix_history"]: 181 List of coefficient matrices (M, Nw+1), one per subband-iteration. 182 extra["subband_outputs"], extra["subband_errors"]: 183 Arrays with shape (M, N_iter). 184 extra["mse_subbands"], extra["mse_overall"]: 185 MSE curves in subbands. 186 187 Extra (when return_internal_states=True) 188 -------------------------------------- 189 extra["sig_ol"]: 190 Final subband energy estimates (M,). 191 """ 192 tic: float = time() 193 194 x = np.asarray(input_signal, dtype=float).ravel() 195 d = np.asarray(desired_signal, dtype=float).ravel() 196 197 n_samples: int = int(x.size) 198 199 xsb_list: list[np.ndarray] = [] 200 dsb_list: list[np.ndarray] = [] 201 for m in range(self.M): 202 xaux_x = _fir_filter_causal(self.hk[m, :], x) 203 xaux_d = _fir_filter_causal(self.hk[m, :], d) 204 xsb_list.append(_decimate_by_L(xaux_x, self.L)) 205 dsb_list.append(_decimate_by_L(xaux_d, self.L)) 206 207 N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0 208 if N_iter == 0: 209 y0 = np.zeros_like(d) 210 runtime_s = float(time() - tic) 211 return self._pack_results( 212 outputs=y0, 213 errors=d - y0, 214 runtime_s=runtime_s, 215 error_type="output_error", 216 extra={ 217 "w_matrix_history": [], 218 "subband_outputs": np.zeros((self.M, 0), dtype=float), 219 "subband_errors": np.zeros((self.M, 0), dtype=float), 220 "mse_subbands": np.zeros((self.M, 0), dtype=float), 221 "mse_overall": np.zeros((0,), dtype=float), 222 }, 223 ) 224 225 xsb = np.vstack([arr[:N_iter] for arr in xsb_list]) 226 dsb = np.vstack([arr[:N_iter] for arr in dsb_list]) 227 228 y_sb = np.zeros((self.M, N_iter), dtype=float) 229 e_sb = np.zeros((self.M, N_iter), dtype=float) 230 231 x_ol = np.zeros((self.M, self.Nw + 1), dtype=float) 232 sig_ol = np.zeros((self.M,), dtype=float) 233 234 self.w_history = [] 235 self._record_history() 236 self.w_matrix_history = [] 237 238 for k in range(N_iter): 239 for m in range(self.M): 240 x_ol[m, 1:] = x_ol[m, :-1] 241 x_ol[m, 0] = xsb[m, k] 242 243 y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :])) 244 e_sb[m, k] = float(dsb[m, k] - y_sb[m, k]) 245 246 sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2) 247 248 mu_m = (2.0 * self.step) / (self.gamma + (self.Nw + 1) * sig_ol[m]) 249 250 self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :] 251 252 self.w_matrix_history.append(self.w_mat.copy()) 253 self._sync_base_w() 254 self._record_history() 255 256 y_full = np.zeros((n_samples,), dtype=float) 257 for m in range(self.M): 258 y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples) 259 y_full += _fir_filter_causal(self.fk[m, :], y_up) 260 261 e_full = d - y_full 262 263 mse_subbands = e_sb ** 2 264 mse_overall = np.mean(mse_subbands, axis=0) 265 266 runtime_s: float = float(time() - tic) 267 if verbose: 268 print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}") 269 270 extra: Dict[str, Any] = { 271 "w_matrix_history": self.w_matrix_history, 272 "subband_outputs": y_sb, 273 "subband_errors": e_sb, 274 "mse_subbands": mse_subbands, 275 "mse_overall": mse_overall, 276 } 277 if return_internal_states: 278 extra["sig_ol"] = sig_ol.copy() 279 280 return self._pack_results( 281 outputs=y_full, 282 errors=e_full, 283 runtime_s=runtime_s, 284 error_type="output_error", 285 extra=extra, 286 )
Executes the adaptation process for the Open-Loop Subband LMS (OLSBLMS) algorithm.
Returns
OptimizationResult outputs: Fullband reconstructed output y(n), same length as desired_signal. errors: Fullband output error e(n) = d(n) - y(n). coefficients: Flattened coefficient history (shape: (#snapshots, M*(Nw+1))).
Extra (always)
extra["w_matrix_history"]: List of coefficient matrices (M, Nw+1), one per subband-iteration. extra["subband_outputs"], extra["subband_errors"]: Arrays with shape (M, N_iter). extra["mse_subbands"], extra["mse_overall"]: MSE curves in subbands.
Extra (when return_internal_states=True)
extra["sig_ol"]: Final subband energy estimates (M,).
26class AffineProjectionCM(AdaptiveFilter): 27 """ 28 Implements the Affine-Projection Constant-Modulus (AP-CM) algorithm 29 for blind adaptive filtering. 30 31 Notes 32 ----- 33 - This is a BLIND algorithm: it does not require desired_signal. 34 - We still accept `desired_signal=None` in `optimize` to keep a unified API. 35 """ 36 supports_complex: bool = True 37 step_size: float 38 memory_length: int 39 gamma: float 40 n_coeffs: int 41 42 def __init__( 43 self, 44 filter_order: int = 5, 45 step_size: float = 0.1, 46 memory_length: int = 2, 47 gamma: float = 1e-6, 48 w_init: Optional[Union[np.ndarray, list]] = None, 49 ) -> None: 50 super().__init__(filter_order, w_init=w_init) 51 self.step_size = float(step_size) 52 self.memory_length = int(memory_length) 53 self.gamma = float(gamma) 54 self.n_coeffs = int(filter_order + 1) 55 56 def optimize( 57 self, 58 input_signal: Union[np.ndarray, list], 59 desired_signal: Optional[Union[np.ndarray, list]] = None, 60 verbose: bool = False, 61 return_internal_states: bool = False, 62 ) -> OptimizationResult: 63 """ 64 Executes the Affine-Projection Constant-Modulus (AP-CM) algorithm. 65 66 Parameters 67 ---------- 68 input_signal: 69 The input signal to be filtered. 70 desired_signal: 71 Ignored (kept only for API standardization). 72 verbose: 73 If True, prints runtime. 74 return_internal_states: 75 If True, returns selected internal values in `extra`. 76 77 Returns 78 ------- 79 OptimizationResult 80 outputs: 81 y[k] = first component of the projection output vector. 82 errors: 83 e[k] = first component of the CM error vector. 84 coefficients: 85 coefficient history stored in the base class. 86 error_type: 87 set to "blind_constant_modulus". 88 89 Extra (when return_internal_states=True) 90 -------------------------------------- 91 extra["last_update_factor"]: 92 Solution of the (regularized) linear system at the last iteration. 93 extra["last_regressor_matrix"]: 94 Final regressor matrix (shape n_coeffs x (memory_length+1)). 95 """ 96 tic: float = time() 97 98 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 99 n_samples: int = int(x.size) 100 101 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 102 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 103 104 L: int = int(self.memory_length) 105 106 regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex) 107 I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex) 108 109 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 110 x_padded[self.filter_order:] = x 111 112 last_update_factor: Optional[np.ndarray] = None 113 114 for k in range(n_samples): 115 regressor_matrix[:, 1:] = regressor_matrix[:, :-1] 116 regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1] 117 118 output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w) 119 120 abs_out: np.ndarray = np.abs(output_ap) 121 desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex) 122 np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12) 123 124 error_ap: np.ndarray = desired_level - output_ap 125 126 phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg 127 update_factor: np.ndarray = np.linalg.solve(phi, error_ap) 128 last_update_factor = update_factor 129 130 self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor) 131 132 outputs[k] = output_ap[0] 133 errors[k] = error_ap[0] 134 135 self._record_history() 136 137 runtime_s: float = float(time() - tic) 138 if verbose: 139 print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms") 140 141 extra: Optional[Dict[str, Any]] = None 142 if return_internal_states: 143 extra = { 144 "last_update_factor": last_update_factor, 145 "last_regressor_matrix": regressor_matrix.copy(), 146 } 147 148 return self._pack_results( 149 outputs=outputs, 150 errors=errors, 151 runtime_s=runtime_s, 152 error_type="blind_constant_modulus", 153 extra=extra, 154 )
Implements the Affine-Projection Constant-Modulus (AP-CM) algorithm for blind adaptive filtering.
Notes
- This is a BLIND algorithm: it does not require desired_signal.
- We still accept
desired_signal=Noneinoptimizeto keep a unified API.
42 def __init__( 43 self, 44 filter_order: int = 5, 45 step_size: float = 0.1, 46 memory_length: int = 2, 47 gamma: float = 1e-6, 48 w_init: Optional[Union[np.ndarray, list]] = None, 49 ) -> None: 50 super().__init__(filter_order, w_init=w_init) 51 self.step_size = float(step_size) 52 self.memory_length = int(memory_length) 53 self.gamma = float(gamma) 54 self.n_coeffs = int(filter_order + 1)
56 def optimize( 57 self, 58 input_signal: Union[np.ndarray, list], 59 desired_signal: Optional[Union[np.ndarray, list]] = None, 60 verbose: bool = False, 61 return_internal_states: bool = False, 62 ) -> OptimizationResult: 63 """ 64 Executes the Affine-Projection Constant-Modulus (AP-CM) algorithm. 65 66 Parameters 67 ---------- 68 input_signal: 69 The input signal to be filtered. 70 desired_signal: 71 Ignored (kept only for API standardization). 72 verbose: 73 If True, prints runtime. 74 return_internal_states: 75 If True, returns selected internal values in `extra`. 76 77 Returns 78 ------- 79 OptimizationResult 80 outputs: 81 y[k] = first component of the projection output vector. 82 errors: 83 e[k] = first component of the CM error vector. 84 coefficients: 85 coefficient history stored in the base class. 86 error_type: 87 set to "blind_constant_modulus". 88 89 Extra (when return_internal_states=True) 90 -------------------------------------- 91 extra["last_update_factor"]: 92 Solution of the (regularized) linear system at the last iteration. 93 extra["last_regressor_matrix"]: 94 Final regressor matrix (shape n_coeffs x (memory_length+1)). 95 """ 96 tic: float = time() 97 98 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 99 n_samples: int = int(x.size) 100 101 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 102 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 103 104 L: int = int(self.memory_length) 105 106 regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex) 107 I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex) 108 109 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 110 x_padded[self.filter_order:] = x 111 112 last_update_factor: Optional[np.ndarray] = None 113 114 for k in range(n_samples): 115 regressor_matrix[:, 1:] = regressor_matrix[:, :-1] 116 regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1] 117 118 output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w) 119 120 abs_out: np.ndarray = np.abs(output_ap) 121 desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex) 122 np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12) 123 124 error_ap: np.ndarray = desired_level - output_ap 125 126 phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg 127 update_factor: np.ndarray = np.linalg.solve(phi, error_ap) 128 last_update_factor = update_factor 129 130 self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor) 131 132 outputs[k] = output_ap[0] 133 errors[k] = error_ap[0] 134 135 self._record_history() 136 137 runtime_s: float = float(time() - tic) 138 if verbose: 139 print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms") 140 141 extra: Optional[Dict[str, Any]] = None 142 if return_internal_states: 143 extra = { 144 "last_update_factor": last_update_factor, 145 "last_regressor_matrix": regressor_matrix.copy(), 146 } 147 148 return self._pack_results( 149 outputs=outputs, 150 errors=errors, 151 runtime_s=runtime_s, 152 error_type="blind_constant_modulus", 153 extra=extra, 154 )
Executes the Affine-Projection Constant-Modulus (AP-CM) algorithm.
Parameters
input_signal:
The input signal to be filtered.
desired_signal:
Ignored (kept only for API standardization).
verbose:
If True, prints runtime.
return_internal_states:
If True, returns selected internal values in extra.
Returns
OptimizationResult outputs: y[k] = first component of the projection output vector. errors: e[k] = first component of the CM error vector. coefficients: coefficient history stored in the base class. error_type: set to "blind_constant_modulus".
Extra (when return_internal_states=True)
extra["last_update_factor"]: Solution of the (regularized) linear system at the last iteration. extra["last_regressor_matrix"]: Final regressor matrix (shape n_coeffs x (memory_length+1)).
25class CMA(AdaptiveFilter): 26 """ 27 Implements the Constant-Modulus Algorithm (CMA) for blind adaptive filtering. 28 29 Notes 30 ----- 31 - This is a BLIND algorithm: it does not require desired_signal. 32 - We keep `desired_signal=None` in `optimize` only for API standardization. 33 """ 34 supports_complex: bool = True 35 36 step_size: float 37 n_coeffs: int 38 39 def __init__( 40 self, 41 filter_order: int = 5, 42 step_size: float = 0.01, 43 w_init: Optional[Union[np.ndarray, list]] = None, 44 ) -> None: 45 super().__init__(filter_order, w_init=w_init) 46 self.step_size = float(step_size) 47 self.n_coeffs = int(filter_order + 1) 48 49 def optimize( 50 self, 51 input_signal: Union[np.ndarray, list], 52 desired_signal: Optional[Union[np.ndarray, list]] = None, 53 verbose: bool = False, 54 return_internal_states: bool = False, 55 safe_eps: float = 1e-12, 56 ) -> OptimizationResult: 57 """ 58 Executes the Constant-Modulus Algorithm (CMA) weight update process. 59 60 Parameters 61 ---------- 62 input_signal: 63 Input signal to be filtered. 64 desired_signal: 65 Ignored (kept only for API standardization). 66 verbose: 67 If True, prints runtime. 68 return_internal_states: 69 If True, includes internal signals in result.extra. 70 safe_eps: 71 Small epsilon to avoid division by zero when estimating the dispersion constant. 72 73 Returns 74 ------- 75 OptimizationResult 76 outputs: 77 Filter output y[k]. 78 errors: 79 CMA error (|y[k]|^2 - R2). 80 coefficients: 81 History of coefficients stored in the base class. 82 error_type: 83 "blind_constant_modulus". 84 85 Extra (when return_internal_states=True) 86 -------------------------------------- 87 extra["dispersion_constant"]: 88 R2 used by CMA. 89 extra["instantaneous_phi"]: 90 Trajectory of phi[k] = 2*e[k]*conj(y[k]) (complex), length N. 91 """ 92 tic: float = time() 93 94 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 95 n_samples: int = int(x.size) 96 97 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 98 errors: np.ndarray = np.zeros(n_samples, dtype=float) 99 denom: float = float(np.mean(np.abs(x) ** 2)) 100 if denom < safe_eps: 101 desired_level: float = 0.0 102 else: 103 desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps)) 104 105 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 106 107 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 108 x_padded[self.filter_order:] = x 109 110 for k in range(n_samples): 111 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 112 113 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 114 outputs[k] = y_k 115 116 e_k: float = float((np.abs(y_k) ** 2) - desired_level) 117 errors[k] = e_k 118 119 phi_k: complex = complex(2.0 * e_k * np.conj(y_k)) 120 if return_internal_states and phi_track is not None: 121 phi_track[k] = phi_k 122 123 self.w = self.w - self.step_size * phi_k * x_k 124 self._record_history() 125 126 runtime_s: float = float(time() - tic) 127 if verbose: 128 print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms") 129 130 extra: Optional[Dict[str, Any]] = None 131 if return_internal_states: 132 extra = { 133 "dispersion_constant": desired_level, 134 "instantaneous_phi": phi_track, 135 } 136 137 return self._pack_results( 138 outputs=outputs, 139 errors=errors, 140 runtime_s=runtime_s, 141 error_type="blind_constant_modulus", 142 extra=extra, 143 )
Implements the Constant-Modulus Algorithm (CMA) for blind adaptive filtering.
Notes
- This is a BLIND algorithm: it does not require desired_signal.
- We keep
desired_signal=Noneinoptimizeonly for API standardization.
49 def optimize( 50 self, 51 input_signal: Union[np.ndarray, list], 52 desired_signal: Optional[Union[np.ndarray, list]] = None, 53 verbose: bool = False, 54 return_internal_states: bool = False, 55 safe_eps: float = 1e-12, 56 ) -> OptimizationResult: 57 """ 58 Executes the Constant-Modulus Algorithm (CMA) weight update process. 59 60 Parameters 61 ---------- 62 input_signal: 63 Input signal to be filtered. 64 desired_signal: 65 Ignored (kept only for API standardization). 66 verbose: 67 If True, prints runtime. 68 return_internal_states: 69 If True, includes internal signals in result.extra. 70 safe_eps: 71 Small epsilon to avoid division by zero when estimating the dispersion constant. 72 73 Returns 74 ------- 75 OptimizationResult 76 outputs: 77 Filter output y[k]. 78 errors: 79 CMA error (|y[k]|^2 - R2). 80 coefficients: 81 History of coefficients stored in the base class. 82 error_type: 83 "blind_constant_modulus". 84 85 Extra (when return_internal_states=True) 86 -------------------------------------- 87 extra["dispersion_constant"]: 88 R2 used by CMA. 89 extra["instantaneous_phi"]: 90 Trajectory of phi[k] = 2*e[k]*conj(y[k]) (complex), length N. 91 """ 92 tic: float = time() 93 94 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 95 n_samples: int = int(x.size) 96 97 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 98 errors: np.ndarray = np.zeros(n_samples, dtype=float) 99 denom: float = float(np.mean(np.abs(x) ** 2)) 100 if denom < safe_eps: 101 desired_level: float = 0.0 102 else: 103 desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps)) 104 105 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 106 107 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 108 x_padded[self.filter_order:] = x 109 110 for k in range(n_samples): 111 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 112 113 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 114 outputs[k] = y_k 115 116 e_k: float = float((np.abs(y_k) ** 2) - desired_level) 117 errors[k] = e_k 118 119 phi_k: complex = complex(2.0 * e_k * np.conj(y_k)) 120 if return_internal_states and phi_track is not None: 121 phi_track[k] = phi_k 122 123 self.w = self.w - self.step_size * phi_k * x_k 124 self._record_history() 125 126 runtime_s: float = float(time() - tic) 127 if verbose: 128 print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms") 129 130 extra: Optional[Dict[str, Any]] = None 131 if return_internal_states: 132 extra = { 133 "dispersion_constant": desired_level, 134 "instantaneous_phi": phi_track, 135 } 136 137 return self._pack_results( 138 outputs=outputs, 139 errors=errors, 140 runtime_s=runtime_s, 141 error_type="blind_constant_modulus", 142 extra=extra, 143 )
Executes the Constant-Modulus Algorithm (CMA) weight update process.
Parameters
input_signal: Input signal to be filtered. desired_signal: Ignored (kept only for API standardization). verbose: If True, prints runtime. return_internal_states: If True, includes internal signals in result.extra. safe_eps: Small epsilon to avoid division by zero when estimating the dispersion constant.
Returns
OptimizationResult outputs: Filter output y[k]. errors: CMA error (|y[k]|^2 - R2). coefficients: History of coefficients stored in the base class. error_type: "blind_constant_modulus".
Extra (when return_internal_states=True)
extra["dispersion_constant"]: R2 used by CMA. extra["instantaneous_phi"]: Trajectory of phi[k] = 2e[k]conj(y[k]) (complex), length N.
25class Godard(AdaptiveFilter): 26 """ 27 Implements the Godard algorithm for blind adaptive filtering with complex-valued data. 28 29 This is a blind adaptation criterion that does not require a desired signal. 30 A `desired_signal=None` parameter is accepted only to keep a unified API signature 31 across the library. 32 """ 33 supports_complex: bool = True 34 35 step_size: float 36 p: int 37 q: int 38 n_coeffs: int 39 40 def __init__( 41 self, 42 filter_order: int = 5, 43 step_size: float = 0.01, 44 p_exponent: int = 2, 45 q_exponent: int = 2, 46 w_init: Optional[Union[np.ndarray, list]] = None, 47 ) -> None: 48 """ 49 Parameters 50 ---------- 51 filter_order: 52 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 53 step_size: 54 Adaptation step size. 55 p_exponent: 56 Exponent p used by the Godard cost (typically p=2). 57 q_exponent: 58 Exponent q used by the Godard cost (typically q=2). 59 w_init: 60 Optional initial coefficient vector. If None, initializes to zeros. 61 """ 62 super().__init__(filter_order, w_init=w_init) 63 self.step_size = float(step_size) 64 self.p = int(p_exponent) 65 self.q = int(q_exponent) 66 self.n_coeffs = int(filter_order + 1) 67 68 def optimize( 69 self, 70 input_signal: Union[np.ndarray, list], 71 desired_signal: Optional[Union[np.ndarray, list]] = None, 72 verbose: bool = False, 73 return_internal_states: bool = False, 74 safe_eps: float = 1e-12, 75 ) -> OptimizationResult: 76 """ 77 Executes the Godard adaptive algorithm. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal to be filtered. 83 desired_signal: 84 Ignored (kept only for API standardization). 85 verbose: 86 If True, prints runtime. 87 return_internal_states: 88 If True, returns internal signals in result.extra. 89 safe_eps: 90 Small epsilon used to avoid divisions by zero and unstable powers. 91 92 Returns 93 ------- 94 OptimizationResult 95 outputs: 96 Filter output y[k]. 97 errors: 98 Godard error defined here as: e[k] = |y[k]|^q - Rq. 99 coefficients: 100 History of coefficients stored in the base class. 101 error_type: 102 "blind_godard". 103 104 Extra (when return_internal_states=True) 105 -------------------------------------- 106 extra["phi_gradient"]: 107 Trajectory of the instantaneous gradient term used for weight update, length N. 108 extra["dispersion_constant"]: 109 Scalar Rq used by the criterion. 110 """ 111 tic: float = time() 112 113 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 114 n_samples: int = int(x.size) 115 116 num: float = float(np.mean(np.abs(x) ** (2 * self.q))) 117 den: float = float(np.mean(np.abs(x) ** self.q)) 118 desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 119 120 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 121 errors: np.ndarray = np.zeros(n_samples, dtype=float) 122 123 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 124 125 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 126 x_padded[self.filter_order:] = x 127 128 for k in range(n_samples): 129 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 130 131 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 132 outputs[k] = y_k 133 134 e_k: float = float((np.abs(y_k) ** self.q) - desired_level) 135 errors[k] = e_k 136 137 if np.abs(y_k) > safe_eps: 138 phi_k: complex = complex( 139 self.p 140 * self.q 141 * (e_k ** (self.p - 1)) 142 * (np.abs(y_k) ** (self.q - 2)) 143 * np.conj(y_k) 144 ) 145 else: 146 phi_k = 0.0 + 0.0j 147 148 if return_internal_states and phi_track is not None: 149 phi_track[k] = phi_k 150 151 self.w = self.w - (self.step_size * phi_k * x_k) / 2.0 152 self._record_history() 153 154 runtime_s: float = float(time() - tic) 155 if verbose: 156 print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms") 157 158 extra: Optional[Dict[str, Any]] = None 159 if return_internal_states: 160 extra = { 161 "phi_gradient": phi_track, 162 "dispersion_constant": desired_level, 163 } 164 165 return self._pack_results( 166 outputs=outputs, 167 errors=errors, 168 runtime_s=runtime_s, 169 error_type="blind_godard", 170 extra=extra, 171 )
Implements the Godard algorithm for blind adaptive filtering with complex-valued data.
This is a blind adaptation criterion that does not require a desired signal.
A desired_signal=None parameter is accepted only to keep a unified API signature
across the library.
40 def __init__( 41 self, 42 filter_order: int = 5, 43 step_size: float = 0.01, 44 p_exponent: int = 2, 45 q_exponent: int = 2, 46 w_init: Optional[Union[np.ndarray, list]] = None, 47 ) -> None: 48 """ 49 Parameters 50 ---------- 51 filter_order: 52 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 53 step_size: 54 Adaptation step size. 55 p_exponent: 56 Exponent p used by the Godard cost (typically p=2). 57 q_exponent: 58 Exponent q used by the Godard cost (typically q=2). 59 w_init: 60 Optional initial coefficient vector. If None, initializes to zeros. 61 """ 62 super().__init__(filter_order, w_init=w_init) 63 self.step_size = float(step_size) 64 self.p = int(p_exponent) 65 self.q = int(q_exponent) 66 self.n_coeffs = int(filter_order + 1)
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. step_size: Adaptation step size. p_exponent: Exponent p used by the Godard cost (typically p=2). q_exponent: Exponent q used by the Godard cost (typically q=2). w_init: Optional initial coefficient vector. If None, initializes to zeros.
68 def optimize( 69 self, 70 input_signal: Union[np.ndarray, list], 71 desired_signal: Optional[Union[np.ndarray, list]] = None, 72 verbose: bool = False, 73 return_internal_states: bool = False, 74 safe_eps: float = 1e-12, 75 ) -> OptimizationResult: 76 """ 77 Executes the Godard adaptive algorithm. 78 79 Parameters 80 ---------- 81 input_signal: 82 Input signal to be filtered. 83 desired_signal: 84 Ignored (kept only for API standardization). 85 verbose: 86 If True, prints runtime. 87 return_internal_states: 88 If True, returns internal signals in result.extra. 89 safe_eps: 90 Small epsilon used to avoid divisions by zero and unstable powers. 91 92 Returns 93 ------- 94 OptimizationResult 95 outputs: 96 Filter output y[k]. 97 errors: 98 Godard error defined here as: e[k] = |y[k]|^q - Rq. 99 coefficients: 100 History of coefficients stored in the base class. 101 error_type: 102 "blind_godard". 103 104 Extra (when return_internal_states=True) 105 -------------------------------------- 106 extra["phi_gradient"]: 107 Trajectory of the instantaneous gradient term used for weight update, length N. 108 extra["dispersion_constant"]: 109 Scalar Rq used by the criterion. 110 """ 111 tic: float = time() 112 113 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 114 n_samples: int = int(x.size) 115 116 num: float = float(np.mean(np.abs(x) ** (2 * self.q))) 117 den: float = float(np.mean(np.abs(x) ** self.q)) 118 desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 119 120 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 121 errors: np.ndarray = np.zeros(n_samples, dtype=float) 122 123 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 124 125 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 126 x_padded[self.filter_order:] = x 127 128 for k in range(n_samples): 129 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 130 131 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 132 outputs[k] = y_k 133 134 e_k: float = float((np.abs(y_k) ** self.q) - desired_level) 135 errors[k] = e_k 136 137 if np.abs(y_k) > safe_eps: 138 phi_k: complex = complex( 139 self.p 140 * self.q 141 * (e_k ** (self.p - 1)) 142 * (np.abs(y_k) ** (self.q - 2)) 143 * np.conj(y_k) 144 ) 145 else: 146 phi_k = 0.0 + 0.0j 147 148 if return_internal_states and phi_track is not None: 149 phi_track[k] = phi_k 150 151 self.w = self.w - (self.step_size * phi_k * x_k) / 2.0 152 self._record_history() 153 154 runtime_s: float = float(time() - tic) 155 if verbose: 156 print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms") 157 158 extra: Optional[Dict[str, Any]] = None 159 if return_internal_states: 160 extra = { 161 "phi_gradient": phi_track, 162 "dispersion_constant": desired_level, 163 } 164 165 return self._pack_results( 166 outputs=outputs, 167 errors=errors, 168 runtime_s=runtime_s, 169 error_type="blind_godard", 170 extra=extra, 171 )
Executes the Godard adaptive algorithm.
Parameters
input_signal: Input signal to be filtered. desired_signal: Ignored (kept only for API standardization). verbose: If True, prints runtime. return_internal_states: If True, returns internal signals in result.extra. safe_eps: Small epsilon used to avoid divisions by zero and unstable powers.
Returns
OptimizationResult outputs: Filter output y[k]. errors: Godard error defined here as: e[k] = |y[k]|^q - Rq. coefficients: History of coefficients stored in the base class. error_type: "blind_godard".
Extra (when return_internal_states=True)
extra["phi_gradient"]: Trajectory of the instantaneous gradient term used for weight update, length N. extra["dispersion_constant"]: Scalar Rq used by the criterion.
25class Sato(AdaptiveFilter): 26 """ 27 Implements the Sato algorithm for blind adaptive filtering with complex-valued data. 28 29 Notes 30 ----- 31 - This is a BLIND algorithm: it does not require desired_signal. 32 - We keep `desired_signal=None` in `optimize` only for API standardization. 33 """ 34 supports_complex: bool = True 35 36 step_size: float 37 n_coeffs: int 38 39 def __init__( 40 self, 41 filter_order: int = 5, 42 step_size: float = 0.01, 43 w_init: Optional[Union[np.ndarray, list]] = None, 44 ) -> None: 45 """ 46 Parameters 47 ---------- 48 filter_order: 49 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 50 step_size: 51 Adaptation step size. 52 w_init: 53 Optional initial coefficient vector. If None, initializes to zeros. 54 """ 55 super().__init__(filter_order, w_init=w_init) 56 self.step_size = float(step_size) 57 self.n_coeffs = int(filter_order + 1) 58 59 def optimize( 60 self, 61 input_signal: Union[np.ndarray, list], 62 desired_signal: Optional[Union[np.ndarray, list]] = None, 63 verbose: bool = False, 64 return_internal_states: bool = False, 65 safe_eps: float = 1e-12, 66 ) -> OptimizationResult: 67 """ 68 Executes the Sato blind adaptive algorithm. 69 70 Parameters 71 ---------- 72 input_signal: 73 Input signal to be filtered. 74 desired_signal: 75 Ignored (kept only for API standardization). 76 verbose: 77 If True, prints runtime. 78 return_internal_states: 79 If True, returns internal signals in result.extra. 80 safe_eps: 81 Small epsilon used to avoid division by zero. 82 83 Returns 84 ------- 85 OptimizationResult 86 outputs: 87 Filter output y[k]. 88 errors: 89 Sato error defined here as: e[k] = y[k] - gamma * sign(y[k]). 90 coefficients: 91 History of coefficients stored in the base class. 92 error_type: 93 "blind_sato". 94 95 Extra (when return_internal_states=True) 96 -------------------------------------- 97 extra["sato_sign_track"]: 98 Track of sign(y[k]) = y[k]/|y[k]| (with safe handling around zero), length N. 99 extra["dispersion_constant"]: 100 Scalar gamma used by the Sato criterion. 101 """ 102 tic: float = time() 103 104 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 105 n_samples: int = int(x.size) 106 107 num: float = float(np.mean(np.abs(x) ** 2)) 108 den: float = float(np.mean(np.abs(x))) 109 dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 110 111 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 112 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 113 114 sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 115 116 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 117 x_padded[self.filter_order:] = x 118 119 for k in range(n_samples): 120 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 121 122 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 123 outputs[k] = y_k 124 125 mag: float = float(np.abs(y_k)) 126 sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j) 127 128 if return_internal_states and sign_track is not None: 129 sign_track[k] = sato_sign 130 131 e_k: complex = y_k - sato_sign * dispersion_constant 132 errors[k] = e_k 133 134 self.w = self.w - self.step_size * np.conj(e_k) * x_k 135 self._record_history() 136 137 runtime_s: float = float(time() - tic) 138 if verbose: 139 print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms") 140 141 extra: Optional[Dict[str, Any]] = None 142 if return_internal_states: 143 extra = { 144 "sato_sign_track": sign_track, 145 "dispersion_constant": dispersion_constant, 146 } 147 148 return self._pack_results( 149 outputs=outputs, 150 errors=errors, 151 runtime_s=runtime_s, 152 error_type="blind_sato", 153 extra=extra, 154 )
Implements the Sato algorithm for blind adaptive filtering with complex-valued data.
Notes
- This is a BLIND algorithm: it does not require desired_signal.
- We keep
desired_signal=Noneinoptimizeonly for API standardization.
39 def __init__( 40 self, 41 filter_order: int = 5, 42 step_size: float = 0.01, 43 w_init: Optional[Union[np.ndarray, list]] = None, 44 ) -> None: 45 """ 46 Parameters 47 ---------- 48 filter_order: 49 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 50 step_size: 51 Adaptation step size. 52 w_init: 53 Optional initial coefficient vector. If None, initializes to zeros. 54 """ 55 super().__init__(filter_order, w_init=w_init) 56 self.step_size = float(step_size) 57 self.n_coeffs = int(filter_order + 1)
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. step_size: Adaptation step size. w_init: Optional initial coefficient vector. If None, initializes to zeros.
59 def optimize( 60 self, 61 input_signal: Union[np.ndarray, list], 62 desired_signal: Optional[Union[np.ndarray, list]] = None, 63 verbose: bool = False, 64 return_internal_states: bool = False, 65 safe_eps: float = 1e-12, 66 ) -> OptimizationResult: 67 """ 68 Executes the Sato blind adaptive algorithm. 69 70 Parameters 71 ---------- 72 input_signal: 73 Input signal to be filtered. 74 desired_signal: 75 Ignored (kept only for API standardization). 76 verbose: 77 If True, prints runtime. 78 return_internal_states: 79 If True, returns internal signals in result.extra. 80 safe_eps: 81 Small epsilon used to avoid division by zero. 82 83 Returns 84 ------- 85 OptimizationResult 86 outputs: 87 Filter output y[k]. 88 errors: 89 Sato error defined here as: e[k] = y[k] - gamma * sign(y[k]). 90 coefficients: 91 History of coefficients stored in the base class. 92 error_type: 93 "blind_sato". 94 95 Extra (when return_internal_states=True) 96 -------------------------------------- 97 extra["sato_sign_track"]: 98 Track of sign(y[k]) = y[k]/|y[k]| (with safe handling around zero), length N. 99 extra["dispersion_constant"]: 100 Scalar gamma used by the Sato criterion. 101 """ 102 tic: float = time() 103 104 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 105 n_samples: int = int(x.size) 106 107 num: float = float(np.mean(np.abs(x) ** 2)) 108 den: float = float(np.mean(np.abs(x))) 109 dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 110 111 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 112 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 113 114 sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 115 116 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 117 x_padded[self.filter_order:] = x 118 119 for k in range(n_samples): 120 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 121 122 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 123 outputs[k] = y_k 124 125 mag: float = float(np.abs(y_k)) 126 sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j) 127 128 if return_internal_states and sign_track is not None: 129 sign_track[k] = sato_sign 130 131 e_k: complex = y_k - sato_sign * dispersion_constant 132 errors[k] = e_k 133 134 self.w = self.w - self.step_size * np.conj(e_k) * x_k 135 self._record_history() 136 137 runtime_s: float = float(time() - tic) 138 if verbose: 139 print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms") 140 141 extra: Optional[Dict[str, Any]] = None 142 if return_internal_states: 143 extra = { 144 "sato_sign_track": sign_track, 145 "dispersion_constant": dispersion_constant, 146 } 147 148 return self._pack_results( 149 outputs=outputs, 150 errors=errors, 151 runtime_s=runtime_s, 152 error_type="blind_sato", 153 extra=extra, 154 )
Executes the Sato blind adaptive algorithm.
Parameters
input_signal: Input signal to be filtered. desired_signal: Ignored (kept only for API standardization). verbose: If True, prints runtime. return_internal_states: If True, returns internal signals in result.extra. safe_eps: Small epsilon used to avoid division by zero.
Returns
OptimizationResult outputs: Filter output y[k]. errors: Sato error defined here as: e[k] = y[k] - gamma * sign(y[k]). coefficients: History of coefficients stored in the base class. error_type: "blind_sato".
Extra (when return_internal_states=True)
extra["sato_sign_track"]: Track of sign(y[k]) = y[k]/|y[k]| (with safe handling around zero), length N. extra["dispersion_constant"]: Scalar gamma used by the Sato criterion.
114class Kalman(AdaptiveFilter): 115 """Kalman Filter for state estimation with complex or real-valued data. 116 117 State-space model (standard form) 118 --------------------------------- 119 x(k) = A(k-1) x(k-1) + B(k) n(k) 120 y(k) = C^T(k) x(k) + n1(k) 121 122 with covariances: 123 E[n(k) n(k)^H] = Rn(k) 124 E[n1(k) n1(k)^H] = Rn1(k) 125 126 Notes 127 ----- 128 - This class integrates the Kalman recursion into the library-wide 129 `AdaptiveFilter`/`OptimizationResult` interface. 130 - `self.w` stores the current state estimate x(k|k) as a 1-D vector (n,). 131 - In this implementation, `OptimizationResult.coefficients` stores the 132 covariance history Re(k|k) with shape (N, n, n). To achieve this using 133 the standardized `_pack_results`, we store Re(k|k) snapshots in 134 `self.w_history` (overriding the typical meaning of w_history for this 135 non-FIR algorithm). 136 137 Parameters 138 ---------- 139 A: 140 State transition matrix A(k-1), shape (n, n), or a sequence over k. 141 C_T: 142 Measurement matrix C^T(k), shape (p, n), or a sequence over k. 143 Rn: 144 Process noise covariance Rn(k), shape (q, q), or a sequence over k. 145 Rn1: 146 Measurement noise covariance Rn1(k), shape (p, p), or a sequence over k. 147 B: 148 Process noise input matrix B(k), shape (n, q). If None, identity is used 149 (q = n). 150 x_init: 151 Initial state estimate x(0|0), shape (n,), (n,1) or (1,n). If None, zeros. 152 Re_init: 153 Initial error covariance Re(0|0), shape (n, n). If None, identity. 154 155 Raises 156 ------ 157 ValueError 158 If the initial shapes are inconsistent. 159 """ 160 161 supports_complex: bool = True 162 163 A: Union[np.ndarray, Sequence[np.ndarray]] 164 C_T: Union[np.ndarray, Sequence[np.ndarray]] 165 Rn: Union[np.ndarray, Sequence[np.ndarray]] 166 Rn1: Union[np.ndarray, Sequence[np.ndarray]] 167 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] 168 169 x: np.ndarray 170 Re: np.ndarray 171 172 def __init__( 173 self, 174 A: Union[np.ndarray, Sequence[np.ndarray]], 175 C_T: Union[np.ndarray, Sequence[np.ndarray]], 176 Rn: Union[np.ndarray, Sequence[np.ndarray]], 177 Rn1: Union[np.ndarray, Sequence[np.ndarray]], 178 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None, 179 x_init: Optional[np.ndarray] = None, 180 Re_init: Optional[np.ndarray] = None, 181 ) -> None: 182 A0 = _mat_at_k(A, 0) 183 if A0.ndim != 2 or A0.shape[0] != A0.shape[1]: 184 raise ValueError(f"A must be square (n,n). Got {A0.shape}.") 185 n = int(A0.shape[0]) 186 187 super().__init__(filter_order=n - 1, w_init=None) 188 189 self.A = A 190 self.C_T = C_T 191 self.Rn = Rn 192 self.Rn1 = Rn1 193 self.B = B 194 195 dtype = np.result_type( 196 A0, _mat_at_k(C_T, 0), _mat_at_k(Rn, 0), _mat_at_k(Rn1, 0) 197 ) 198 dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128 199 200 self._dtype = dtype 201 self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype) 202 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 203 204 if x_init is None: 205 x0 = np.zeros((n, 1), dtype=dtype) 206 else: 207 x0 = _as_2d_col(np.asarray(x_init, dtype=dtype)) 208 if x0.shape[0] != n: 209 raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.") 210 self.x = x0 211 212 if Re_init is None: 213 Re0 = np.eye(n, dtype=dtype) 214 else: 215 Re0 = np.asarray(Re_init, dtype=dtype) 216 if Re0.shape != (n, n): 217 raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.") 218 self.Re = Re0 219 220 self.w = self.x[:, 0].copy() 221 222 self.w_history = [] 223 224 def _validate_step_shapes( 225 self, 226 A: np.ndarray, 227 C_T: np.ndarray, 228 Rn: np.ndarray, 229 Rn1: np.ndarray, 230 B: np.ndarray, 231 ) -> None: 232 """Validate per-iteration matrix shapes. 233 234 Raises 235 ------ 236 ValueError 237 If any matrix has an unexpected shape for the current state dimension. 238 """ 239 n = int(self.x.shape[0]) 240 if A.shape != (n, n): 241 raise ValueError(f"A(k) must be {(n,n)}. Got {A.shape}.") 242 if C_T.ndim != 2 or C_T.shape[1] != n: 243 raise ValueError(f"C_T(k) must be (p,n) with n={n}. Got {C_T.shape}.") 244 p = int(C_T.shape[0]) 245 if Rn1.shape != (p, p): 246 raise ValueError(f"Rn1(k) must be {(p,p)}. Got {Rn1.shape}.") 247 if B.ndim != 2 or B.shape[0] != n: 248 raise ValueError(f"B(k) must be (n,q) with n={n}. Got {B.shape}.") 249 q = int(B.shape[1]) 250 if Rn.shape != (q, q): 251 raise ValueError(f"Rn(k) must be {(q,q)}. Got {Rn.shape}.") 252 253 def optimize( 254 self, 255 input_signal: ArrayLike, 256 desired_signal: Optional[ArrayLike] = None, 257 verbose: bool = False, 258 return_internal_states: bool = False, 259 safe_eps: float = 1e-12, 260 ) -> OptimizationResult: 261 """Execute the Kalman recursion for a sequence of measurements y(k). 262 263 Parameters 264 ---------- 265 input_signal: 266 Measurement sequence y(k). Accepted shapes: 267 - (N,) for scalar measurements 268 - (N,p) for p-dimensional measurements 269 - (N,p,1) also accepted (will be squeezed to (N,p)) 270 desired_signal: 271 Ignored (kept only for API standardization). 272 verbose: 273 If True, prints runtime. 274 return_internal_states: 275 If True, returns selected internal values in `result.extra`. 276 safe_eps: 277 Small positive value used to regularize the innovation covariance 278 matrix when a linear solve fails (numerical stabilization). 279 280 Returns 281 ------- 282 OptimizationResult 283 outputs: 284 State estimates x(k|k), shape (N, n_states). 285 errors: 286 Innovations e(k) = y(k) - C^T(k) x(k|k-1), shape (N, n_meas). 287 coefficients: 288 Covariance history Re(k|k), shape (N, n_states, n_states). 289 error_type: 290 "innovation". 291 292 Extra (when return_internal_states=True) 293 -------------------------------------- 294 extra["kalman_gain_last"]: 295 Kalman gain K at the last iteration. 296 extra["predicted_state_last"]: 297 Predicted state x(k|k-1) at the last iteration (shape (n,)). 298 extra["predicted_cov_last"]: 299 Predicted covariance Re(k|k-1) at the last iteration. 300 extra["innovation_cov_last"]: 301 Innovation covariance S at the last iteration. 302 extra["safe_eps"]: 303 The stabilization epsilon used. 304 """ 305 t0 = perf_counter() 306 307 y_mat = _as_meas_matrix(np.asarray(input_signal)) 308 y_mat = y_mat.astype(self._dtype, copy=False) 309 310 N = int(y_mat.shape[0]) 311 n = int(self.x.shape[0]) 312 p_dim = int(y_mat.shape[1]) 313 314 outputs = np.zeros((N, n), dtype=self._dtype) 315 errors = np.zeros((N, p_dim), dtype=self._dtype) 316 317 I_n = np.eye(n, dtype=self._dtype) 318 319 self.w_history = [] 320 321 last_K: Optional[np.ndarray] = None 322 last_x_pred: Optional[np.ndarray] = None 323 last_Re_pred: Optional[np.ndarray] = None 324 last_S: Optional[np.ndarray] = None 325 326 for k in range(N): 327 A_k = np.asarray(_mat_at_k(self.A, k), dtype=self._dtype) 328 C_T_k = np.asarray(_mat_at_k(self.C_T, k), dtype=self._dtype) 329 Rn_k = np.asarray(_mat_at_k(self.Rn, k), dtype=self._dtype) 330 Rn1_k = np.asarray(_mat_at_k(self.Rn1, k), dtype=self._dtype) 331 B_k = np.asarray( 332 _mat_at_k(self.B, k) if self.B is not None else I_n, 333 dtype=self._dtype, 334 ) 335 336 self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k) 337 338 y_k = _as_2d_col(y_mat[k]).astype(self._dtype, copy=False) 339 C_k = C_T_k.conj().T 340 341 x_pred = A_k @ self.x 342 Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T) 343 344 e_k = y_k - (C_T_k @ x_pred) 345 346 S = (C_T_k @ Re_pred @ C_k) + Rn1_k 347 348 RC = Re_pred @ C_k 349 try: 350 K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T 351 except np.linalg.LinAlgError: 352 S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype)) 353 K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T 354 355 self.x = x_pred + (K @ e_k) 356 self.Re = (I_n - (K @ C_T_k)) @ Re_pred 357 358 outputs[k, :] = self.x[:, 0] 359 errors[k, :] = e_k[:, 0] 360 361 self.w_history.append(self.Re.copy()) 362 363 self.w = self.x[:, 0].copy() 364 365 last_K = K 366 last_x_pred = x_pred 367 last_Re_pred = Re_pred 368 last_S = S 369 370 runtime_s = float(perf_counter() - t0) 371 if verbose: 372 print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms") 373 374 extra: Optional[Dict[str, Any]] = None 375 if return_internal_states: 376 extra = { 377 "kalman_gain_last": last_K, 378 "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(), 379 "predicted_cov_last": last_Re_pred, 380 "innovation_cov_last": last_S, 381 "safe_eps": float(safe_eps), 382 } 383 384 return self._pack_results( 385 outputs=outputs, 386 errors=errors, 387 runtime_s=runtime_s, 388 error_type="innovation", 389 extra=extra, 390 )
Kalman Filter for state estimation with complex or real-valued data.
State-space model (standard form)
x(k) = A(k-1) x(k-1) + B(k) n(k) y(k) = C^T(k) x(k) + n1(k)
with covariances: E[n(k) n(k)^H] = Rn(k) E[n1(k) n1(k)^H] = Rn1(k)
Notes
- This class integrates the Kalman recursion into the library-wide
AdaptiveFilter/OptimizationResultinterface. self.wstores the current state estimate x(k|k) as a 1-D vector (n,).- In this implementation,
OptimizationResult.coefficientsstores the covariance history Re(k|k) with shape (N, n, n). To achieve this using the standardized_pack_results, we store Re(k|k) snapshots inself.w_history(overriding the typical meaning of w_history for this non-FIR algorithm).
Parameters
A: State transition matrix A(k-1), shape (n, n), or a sequence over k. C_T: Measurement matrix C^T(k), shape (p, n), or a sequence over k. Rn: Process noise covariance Rn(k), shape (q, q), or a sequence over k. Rn1: Measurement noise covariance Rn1(k), shape (p, p), or a sequence over k. B: Process noise input matrix B(k), shape (n, q). If None, identity is used (q = n). x_init: Initial state estimate x(0|0), shape (n,), (n,1) or (1,n). If None, zeros. Re_init: Initial error covariance Re(0|0), shape (n, n). If None, identity.
Raises
ValueError If the initial shapes are inconsistent.
172 def __init__( 173 self, 174 A: Union[np.ndarray, Sequence[np.ndarray]], 175 C_T: Union[np.ndarray, Sequence[np.ndarray]], 176 Rn: Union[np.ndarray, Sequence[np.ndarray]], 177 Rn1: Union[np.ndarray, Sequence[np.ndarray]], 178 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None, 179 x_init: Optional[np.ndarray] = None, 180 Re_init: Optional[np.ndarray] = None, 181 ) -> None: 182 A0 = _mat_at_k(A, 0) 183 if A0.ndim != 2 or A0.shape[0] != A0.shape[1]: 184 raise ValueError(f"A must be square (n,n). Got {A0.shape}.") 185 n = int(A0.shape[0]) 186 187 super().__init__(filter_order=n - 1, w_init=None) 188 189 self.A = A 190 self.C_T = C_T 191 self.Rn = Rn 192 self.Rn1 = Rn1 193 self.B = B 194 195 dtype = np.result_type( 196 A0, _mat_at_k(C_T, 0), _mat_at_k(Rn, 0), _mat_at_k(Rn1, 0) 197 ) 198 dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128 199 200 self._dtype = dtype 201 self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype) 202 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 203 204 if x_init is None: 205 x0 = np.zeros((n, 1), dtype=dtype) 206 else: 207 x0 = _as_2d_col(np.asarray(x_init, dtype=dtype)) 208 if x0.shape[0] != n: 209 raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.") 210 self.x = x0 211 212 if Re_init is None: 213 Re0 = np.eye(n, dtype=dtype) 214 else: 215 Re0 = np.asarray(Re_init, dtype=dtype) 216 if Re0.shape != (n, n): 217 raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.") 218 self.Re = Re0 219 220 self.w = self.x[:, 0].copy() 221 222 self.w_history = []
253 def optimize( 254 self, 255 input_signal: ArrayLike, 256 desired_signal: Optional[ArrayLike] = None, 257 verbose: bool = False, 258 return_internal_states: bool = False, 259 safe_eps: float = 1e-12, 260 ) -> OptimizationResult: 261 """Execute the Kalman recursion for a sequence of measurements y(k). 262 263 Parameters 264 ---------- 265 input_signal: 266 Measurement sequence y(k). Accepted shapes: 267 - (N,) for scalar measurements 268 - (N,p) for p-dimensional measurements 269 - (N,p,1) also accepted (will be squeezed to (N,p)) 270 desired_signal: 271 Ignored (kept only for API standardization). 272 verbose: 273 If True, prints runtime. 274 return_internal_states: 275 If True, returns selected internal values in `result.extra`. 276 safe_eps: 277 Small positive value used to regularize the innovation covariance 278 matrix when a linear solve fails (numerical stabilization). 279 280 Returns 281 ------- 282 OptimizationResult 283 outputs: 284 State estimates x(k|k), shape (N, n_states). 285 errors: 286 Innovations e(k) = y(k) - C^T(k) x(k|k-1), shape (N, n_meas). 287 coefficients: 288 Covariance history Re(k|k), shape (N, n_states, n_states). 289 error_type: 290 "innovation". 291 292 Extra (when return_internal_states=True) 293 -------------------------------------- 294 extra["kalman_gain_last"]: 295 Kalman gain K at the last iteration. 296 extra["predicted_state_last"]: 297 Predicted state x(k|k-1) at the last iteration (shape (n,)). 298 extra["predicted_cov_last"]: 299 Predicted covariance Re(k|k-1) at the last iteration. 300 extra["innovation_cov_last"]: 301 Innovation covariance S at the last iteration. 302 extra["safe_eps"]: 303 The stabilization epsilon used. 304 """ 305 t0 = perf_counter() 306 307 y_mat = _as_meas_matrix(np.asarray(input_signal)) 308 y_mat = y_mat.astype(self._dtype, copy=False) 309 310 N = int(y_mat.shape[0]) 311 n = int(self.x.shape[0]) 312 p_dim = int(y_mat.shape[1]) 313 314 outputs = np.zeros((N, n), dtype=self._dtype) 315 errors = np.zeros((N, p_dim), dtype=self._dtype) 316 317 I_n = np.eye(n, dtype=self._dtype) 318 319 self.w_history = [] 320 321 last_K: Optional[np.ndarray] = None 322 last_x_pred: Optional[np.ndarray] = None 323 last_Re_pred: Optional[np.ndarray] = None 324 last_S: Optional[np.ndarray] = None 325 326 for k in range(N): 327 A_k = np.asarray(_mat_at_k(self.A, k), dtype=self._dtype) 328 C_T_k = np.asarray(_mat_at_k(self.C_T, k), dtype=self._dtype) 329 Rn_k = np.asarray(_mat_at_k(self.Rn, k), dtype=self._dtype) 330 Rn1_k = np.asarray(_mat_at_k(self.Rn1, k), dtype=self._dtype) 331 B_k = np.asarray( 332 _mat_at_k(self.B, k) if self.B is not None else I_n, 333 dtype=self._dtype, 334 ) 335 336 self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k) 337 338 y_k = _as_2d_col(y_mat[k]).astype(self._dtype, copy=False) 339 C_k = C_T_k.conj().T 340 341 x_pred = A_k @ self.x 342 Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T) 343 344 e_k = y_k - (C_T_k @ x_pred) 345 346 S = (C_T_k @ Re_pred @ C_k) + Rn1_k 347 348 RC = Re_pred @ C_k 349 try: 350 K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T 351 except np.linalg.LinAlgError: 352 S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype)) 353 K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T 354 355 self.x = x_pred + (K @ e_k) 356 self.Re = (I_n - (K @ C_T_k)) @ Re_pred 357 358 outputs[k, :] = self.x[:, 0] 359 errors[k, :] = e_k[:, 0] 360 361 self.w_history.append(self.Re.copy()) 362 363 self.w = self.x[:, 0].copy() 364 365 last_K = K 366 last_x_pred = x_pred 367 last_Re_pred = Re_pred 368 last_S = S 369 370 runtime_s = float(perf_counter() - t0) 371 if verbose: 372 print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms") 373 374 extra: Optional[Dict[str, Any]] = None 375 if return_internal_states: 376 extra = { 377 "kalman_gain_last": last_K, 378 "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(), 379 "predicted_cov_last": last_Re_pred, 380 "innovation_cov_last": last_S, 381 "safe_eps": float(safe_eps), 382 } 383 384 return self._pack_results( 385 outputs=outputs, 386 errors=errors, 387 runtime_s=runtime_s, 388 error_type="innovation", 389 extra=extra, 390 )
Execute the Kalman recursion for a sequence of measurements y(k).
Parameters
input_signal:
Measurement sequence y(k). Accepted shapes:
- (N,) for scalar measurements
- (N,p) for p-dimensional measurements
- (N,p,1) also accepted (will be squeezed to (N,p))
desired_signal:
Ignored (kept only for API standardization).
verbose:
If True, prints runtime.
return_internal_states:
If True, returns selected internal values in result.extra.
safe_eps:
Small positive value used to regularize the innovation covariance
matrix when a linear solve fails (numerical stabilization).
Returns
OptimizationResult outputs: State estimates x(k|k), shape (N, n_states). errors: Innovations e(k) = y(k) - C^T(k) x(k|k-1), shape (N, n_meas). coefficients: Covariance history Re(k|k), shape (N, n_states, n_states). error_type: "innovation".
Extra (when return_internal_states=True)
extra["kalman_gain_last"]: Kalman gain K at the last iteration. extra["predicted_state_last"]: Predicted state x(k|k-1) at the last iteration (shape (n,)). extra["predicted_cov_last"]: Predicted covariance Re(k|k-1) at the last iteration. extra["innovation_cov_last"]: Innovation covariance S at the last iteration. extra["safe_eps"]: The stabilization epsilon used.
35def info(): 36 """Imprime informações sobre a cobertura de algoritmos da biblioteca.""" 37 print("\n" + "="*70) 38 print(" PyDaptive Filtering - Complete Library Overview") 39 print(" Reference: 'Adaptive Filtering' by Paulo S. R. Diniz") 40 print("="*70) 41 sections = { 42 "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain", 43 "Cap 5 (RLS)": "Standard RLS, Alternative RLS", 44 "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP", 45 "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS", 46 "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS", 47 "Cap 9 (QR)": "QR-Decomposition Based RLS", 48 "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR", 49 "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS", 50 "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS", 51 "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection", 52 "Cap 17 (Kalman)": "Kalman Filter", 53 } 54 for cap, algs in sections.items(): 55 print(f"\n{cap:25}: {algs}") 56 57 print("\n" + "-"*70) 58 print("Usage example: from pydaptivefiltering import LMS") 59 print("Documentation: help(pydaptivefiltering.LMS)") 60 print("="*70 + "\n")
Imprime informações sobre a cobertura de algoritmos da biblioteca.